diff --git a/.gitignore b/.gitignore
index 5982d2c642c7..759d64580fcd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,6 +26,8 @@ website/node_modules
website/vendor
+.vscode/
+
# Test exclusions
!command/test-fixtures/**/*.tfstate
!command/test-fixtures/**/.terraform/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 36bdf032e29b..b955b46e866f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,9 +1,61 @@
-## 0.1.0 (Unreleased)
+## 0.1.3 (Unreleased)
+
+FEATURES:
+
+* **New Resource:** `azurerm_dns_ptr_record` [GH-141]
+* **New Resource:** `azurerm_servicebus_queue` [GH-151]
+* `azurerm_servicebus_topic` - added a `status` field to allow disabling the topic [GH-150]
+
+IMPROVEMENTS:
+
+* `azurerm_storage_table` - updating the name validation [GH-143]
+* `azurerm_virtual_machine` - making `admin_password` optional for Linux VM's [GH-154]
+
+## 0.1.2 (June 29, 2017)
+
+FEATURES:
+
+* **New Data Source:** `azurerm_managed_disk` ([#121](https://github.com/terraform-providers/terraform-provider-azurerm/issues/121))
+* **New Resource:** `azurerm_application_insights` ([#3](https://github.com/terraform-providers/terraform-provider-azurerm/issues/3))
+* **New Resource:** `azurerm_cosmosdb_account` ([#108](https://github.com/terraform-providers/terraform-provider-azurerm/issues/108))
+* `azurerm_network_interface` now supports import ([#119](https://github.com/terraform-providers/terraform-provider-azurerm/issues/119))
+
+IMPROVEMENTS:
+
+* Ensuring consistency in when storing the `location` field in the state for the `azurerm_availability_set`, `azurerm_express_route_circuit`, `azurerm_load_balancer`, `azurerm_local_network_gateway`, `azurerm_managed_disk`, `azurerm_network_security_group`
+`azurerm_public_ip`, `azurerm_resource_group`, `azurerm_route_table`, `azurerm_storage_account`, `azurerm_virtual_machine` and `azurerm_virtual_network` resources ([#123](https://github.com/terraform-providers/terraform-provider-azurerm/issues/123))
+* `azurerm_redis_cache` - now supports backup settings for Premium Redis Cache's ([#130](https://github.com/terraform-providers/terraform-provider-azurerm/issues/130))
+* `azurerm_storage_account` - exposing a formatted Connection String for Blob access ([#142](https://github.com/terraform-providers/terraform-provider-azurerm/issues/142))
+
+BUG FIXES:
+
+* `azurerm_cdn_endpoint` - fixing update of the `origin_host_header` ([#134](https://github.com/terraform-providers/terraform-provider-azurerm/issues/134))
+* `azurerm_container_service` - exposes the FQDN of the `master_profile` as a computed field ([#125](https://github.com/terraform-providers/terraform-provider-azurerm/issues/125))
+* `azurerm_key_vault` - fixing import / the validation on Access Policies ([#124](https://github.com/terraform-providers/terraform-provider-azurerm/issues/124))
+* `azurerm_network_interface` - Normalizing the location field in the state ([#122](https://github.com/terraform-providers/terraform-provider-azurerm/issues/122))
+* `azurerm_network_interface` - fixing a crash when importing a NIC with a Public IP ([#128](https://github.com/terraform-providers/terraform-provider-azurerm/issues/128))
+* `azurerm_network_security_rule`: `network_security_group_name` is now `ForceNew` ([#138](https://github.com/terraform-providers/terraform-provider-azurerm/issues/138))
+* `azurerm_subnet` now correctly detects changes to Network Securtiy Groups and Routing Table's ([#113](https://github.com/terraform-providers/terraform-provider-azurerm/issues/113))
+* `azurerm_virtual_machine_scale_set` - making `storage_profile_os_disk`.`name` optional ([#129](https://github.com/terraform-providers/terraform-provider-azurerm/issues/129))
+
+## 0.1.1 (June 21, 2017)
+
+BUG FIXES:
+
+* Sort ResourceID.Path keys for consistent output ([#116](https://github.com/terraform-providers/terraform-provider-azurerm/issues/116))
+
+## 0.1.0 (June 20, 2017)
BACKWARDS INCOMPATIBILITIES / NOTES:
FEATURES:
-* **New Data Source:** `azurerm_resource_group` [GH-15022](https://github.com/hashicorp/terraform/pull/15022)
+* **New Data Source:** `azurerm_resource_group` [[#15022](https://github.com/terraform-providers/terraform-provider-azurerm/issues/15022)](https://github.com/hashicorp/terraform/pull/15022)
IMPROVEMENTS:
+
+* Add diff supress func to endpoint_location [[#15094](https://github.com/terraform-providers/terraform-provider-azurerm/issues/15094)](https://github.com/hashicorp/terraform/pull/15094)
+
+BUG FIXES:
+
+* Fixing the Deadlock issue ([#6](https://github.com/terraform-providers/terraform-provider-azurerm/issues/6))
diff --git a/GNUmakefile b/GNUmakefile
index 6234687a37a2..c9eacb4511da 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -1,6 +1,5 @@
TEST?=$$(go list ./... |grep -v 'vendor')
GOFMT_FILES?=$$(find . -name '*.go' |grep -v vendor)
-COVER_TEST?=$$(go list ./... |grep -v 'vendor')
default: build
@@ -15,17 +14,6 @@ test: fmtcheck
testacc: fmtcheck
TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m
-testrace: fmtcheck
- TF_ACC= go test -race $(TEST) $(TESTARGS)
-
-cover:
- @go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \
- go get -u golang.org/x/tools/cmd/cover; \
- fi
- go test $(COVER_TEST) -coverprofile=coverage.out
- go tool cover -html=coverage.out
- rm coverage.out
-
vet:
@echo "go vet ."
@go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \
@@ -47,12 +35,13 @@ errcheck:
vendor-status:
@govendor status
-test-compile: fmtcheck
+test-compile:
@if [ "$(TEST)" = "./..." ]; then \
echo "ERROR: Set TEST to a specific package. For example,"; \
- echo " make test-compile TEST=./builtin/providers/aws"; \
+ echo " make test-compile TEST=./aws"; \
exit 1; \
fi
go test -c $(TEST) $(TESTARGS)
-.PHONY: build test testacc testrace cover vet fmt fmtcheck errcheck vendor-status test-compile
+.PHONY: build test testacc vet fmt fmtcheck errcheck vendor-status test-compile
+
diff --git a/README.md b/README.md
index 1c669794078e..8fd6c924cfc7 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@ Terraform Provider
- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
-![Terraform](https://rawgithub.com/hashicorp/terraform/master/website/source/assets/images/logo-hashicorp.svg)
+![Terraform](https://rawgit.com/hashicorp/terraform-website/master/source/assets/images/logo-hashicorp.svg)
Requirements
------------
@@ -16,17 +16,17 @@ Requirements
Building The Provider
---------------------
-Clone repository to: `$GOPATH/src/github.com/hashicorp/terraform-provider-$PROVIDER_NAME`
+Clone repository to: `$GOPATH/src/github.com/terraform-providers/terraform-provider-$PROVIDER_NAME`
```sh
-$ mkdir -p $GOPATH/src/github.com/hashicorp; cd $GOPATH/src/github.com/hashicorp
+$ mkdir -p $GOPATH/src/github.com/terraform-providers; cd $GOPATH/src/github.com/terraform-providers
$ git clone git@github.com:hashicorp/terraform-provider-$PROVIDER_NAME
```
Enter the provider directory and build the provider
```sh
-$ cd $GOPATH/src/github.com/hashicorp/terraform-provider-$PROVIDER_NAME
+$ cd $GOPATH/src/github.com/terraform-providers/terraform-provider-$PROVIDER_NAME
$ make build
```
@@ -42,7 +42,7 @@ If you wish to work on the provider, you'll first need [Go](http://www.golang.or
To compile the provider, run `make build`. This will build the provider and put the provider binary in the `$GOPATH/bin` directory.
```sh
-$ make bin
+$ make build
...
$ $GOPATH/bin/terraform-provider-$PROVIDER_NAME
...
diff --git a/azurerm/config.go b/azurerm/config.go
index 8fdb95d19a7a..f588dd99b2d7 100644
--- a/azurerm/config.go
+++ b/azurerm/config.go
@@ -7,11 +7,14 @@ import (
"net/http"
"net/http/httputil"
+ "github.com/Azure/azure-sdk-for-go/arm/appinsights"
"github.com/Azure/azure-sdk-for-go/arm/cdn"
"github.com/Azure/azure-sdk-for-go/arm/compute"
"github.com/Azure/azure-sdk-for-go/arm/containerregistry"
"github.com/Azure/azure-sdk-for-go/arm/containerservice"
"github.com/Azure/azure-sdk-for-go/arm/disk"
+ "github.com/Azure/azure-sdk-for-go/arm/dns"
+ "github.com/Azure/azure-sdk-for-go/arm/documentdb"
"github.com/Azure/azure-sdk-for-go/arm/eventhub"
"github.com/Azure/azure-sdk-for-go/arm/keyvault"
"github.com/Azure/azure-sdk-for-go/arm/network"
@@ -50,7 +53,8 @@ type ArmClient struct {
vmImageClient compute.VirtualMachineImagesClient
vmClient compute.VirtualMachinesClient
- diskClient disk.DisksClient
+ diskClient disk.DisksClient
+ documentDBClient documentdb.DatabaseAccountsClient
appGatewayClient network.ApplicationGatewaysClient
ifaceClient network.InterfacesClient
@@ -68,6 +72,7 @@ type ArmClient struct {
vnetPeeringsClient network.VirtualNetworkPeeringsClient
routeTablesClient network.RouteTablesClient
routesClient network.RoutesClient
+ dnsClient dns.RecordSetsClient
cdnProfilesClient cdn.ProfilesClient
cdnEndpointsClient cdn.EndpointsClient
@@ -98,12 +103,15 @@ type ArmClient struct {
trafficManagerEndpointsClient trafficmanager.EndpointsClient
serviceBusNamespacesClient servicebus.NamespacesClient
+ serviceBusQueuesClient servicebus.QueuesClient
serviceBusTopicsClient servicebus.TopicsClient
serviceBusSubscriptionsClient servicebus.SubscriptionsClient
keyVaultClient keyvault.VaultsClient
sqlElasticPoolsClient sql.ElasticPoolsClient
+
+ appInsightsClient appinsights.ComponentsClient
}
func withRequestLogging() autorest.SendDecorator {
@@ -254,6 +262,12 @@ func (c *Config) getArmClient() (*ArmClient, error) {
csc.Sender = autorest.CreateSender(withRequestLogging())
client.containerServicesClient = csc
+ ddb := documentdb.NewDatabaseAccountsClientWithBaseURI(endpoint, c.SubscriptionID)
+ setUserAgent(&ddb.Client)
+ ddb.Authorizer = auth
+ ddb.Sender = autorest.CreateSender(withRequestLogging())
+ client.documentDBClient = ddb
+
dkc := disk.NewDisksClientWithBaseURI(endpoint, c.SubscriptionID)
setUserAgent(&dkc.Client)
dkc.Authorizer = auth
@@ -362,6 +376,12 @@ func (c *Config) getArmClient() (*ArmClient, error) {
rc.Sender = autorest.CreateSender(withRequestLogging())
client.routesClient = rc
+ dn := dns.NewRecordSetsClientWithBaseURI(endpoint, c.SubscriptionID)
+ setUserAgent(&dn.Client)
+ dn.Authorizer = auth
+ dn.Sender = autorest.CreateSender(withRequestLogging())
+ client.dnsClient = dn
+
rgc := resources.NewGroupsClientWithBaseURI(endpoint, c.SubscriptionID)
setUserAgent(&rgc.Client)
rgc.Authorizer = auth
@@ -452,6 +472,12 @@ func (c *Config) getArmClient() (*ArmClient, error) {
sbnc.Sender = autorest.CreateSender(withRequestLogging())
client.serviceBusNamespacesClient = sbnc
+ sbqc := servicebus.NewQueuesClientWithBaseURI(endpoint, c.SubscriptionID)
+ setUserAgent(&sbqc.Client)
+ sbqc.Authorizer = auth
+ sbqc.Sender = autorest.CreateSender(withRequestLogging())
+ client.serviceBusQueuesClient = sbqc
+
sbtc := servicebus.NewTopicsClientWithBaseURI(endpoint, c.SubscriptionID)
setUserAgent(&sbtc.Client)
sbtc.Authorizer = auth
@@ -476,6 +502,12 @@ func (c *Config) getArmClient() (*ArmClient, error) {
sqlepc.Sender = autorest.CreateSender(withRequestLogging())
client.sqlElasticPoolsClient = sqlepc
+ ai := appinsights.NewComponentsClientWithBaseURI(endpoint, c.SubscriptionID)
+ setUserAgent(&ai.Client)
+ ai.Authorizer = auth
+ ai.Sender = autorest.CreateSender(withRequestLogging())
+ client.appInsightsClient = ai
+
return &client, nil
}
diff --git a/azurerm/data_source_managed_disk.go b/azurerm/data_source_managed_disk.go
new file mode 100644
index 000000000000..3034d6b8d65e
--- /dev/null
+++ b/azurerm/data_source_managed_disk.go
@@ -0,0 +1,82 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func dataSourceArmManagedDisk() *schema.Resource {
+ return &schema.Resource{
+ Read: dataSourceArmManagedDiskRead,
+ Schema: map[string]*schema.Schema{
+
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "resource_group_name": {
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "storage_account_type": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "source_uri": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "source_resource_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "os_type": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "disk_size_gb": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+
+ "tags": tagsSchema(),
+ },
+ }
+}
+
+func dataSourceArmManagedDiskRead(d *schema.ResourceData, meta interface{}) error {
+ diskClient := meta.(*ArmClient).diskClient
+
+ resGroup := d.Get("resource_group_name").(string)
+ name := d.Get("name").(string)
+
+ resp, err := diskClient.Get(resGroup, name)
+ if err != nil {
+ if resp.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
+ return fmt.Errorf("[ERROR] Error making Read request on Azure Managed Disk %s (resource group %s): %s", name, resGroup, err)
+ }
+
+ d.SetId(*resp.ID)
+ if resp.Properties != nil {
+ flattenAzureRmManagedDiskProperties(d, resp.Properties)
+ }
+
+ if resp.CreationData != nil {
+ flattenAzureRmManagedDiskCreationData(d, resp.CreationData)
+ }
+
+ flattenAndSetTags(d, resp.Tags)
+
+ return nil
+}
diff --git a/azurerm/data_source_managed_disk_test.go b/azurerm/data_source_managed_disk_test.go
new file mode 100644
index 000000000000..0237fcc5526f
--- /dev/null
+++ b/azurerm/data_source_managed_disk_test.go
@@ -0,0 +1,64 @@
+package azurerm
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+)
+
+func TestAccDataSourceAzureRMManagedDisk_basic(t *testing.T) {
+ ri := acctest.RandInt()
+
+ name := fmt.Sprintf("acctestmanageddisk-%d", ri)
+ resourceGroupName := fmt.Sprintf("acctestRG-%d", ri)
+
+ config := testAccDatSourceAzureRMManagedDiskBasic(name, resourceGroupName)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMPublicIpDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("data.azurerm_managed_disk.test", "name", name),
+ resource.TestCheckResourceAttr("data.azurerm_managed_disk.test", "resource_group_name", resourceGroupName),
+ resource.TestCheckResourceAttr("data.azurerm_managed_disk.test", "storage_account_type", "Premium_LRS"),
+ resource.TestCheckResourceAttr("data.azurerm_managed_disk.test", "disk_size_gb", "10"),
+ resource.TestCheckResourceAttr("data.azurerm_managed_disk.test", "tags.%", "1"),
+ resource.TestCheckResourceAttr("data.azurerm_managed_disk.test", "tags.environment", "acctest"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccDatSourceAzureRMManagedDiskBasic(name string, resourceGroupName string) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "%s"
+ location = "West US"
+}
+
+resource "azurerm_managed_disk" "test" {
+ name = "%s"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ storage_account_type = "Premium_LRS"
+ create_option = "Empty"
+ disk_size_gb = "10"
+
+ tags {
+ environment = "acctest"
+ }
+}
+
+data "azurerm_managed_disk" "test" {
+ name = "${azurerm_managed_disk.test.name}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+`, resourceGroupName, name)
+}
diff --git a/azurerm/import_arm_application_insights_test.go b/azurerm/import_arm_application_insights_test.go
new file mode 100644
index 000000000000..21e70686ed2a
--- /dev/null
+++ b/azurerm/import_arm_application_insights_test.go
@@ -0,0 +1,56 @@
+package azurerm
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+)
+
+func TestAccAzureRMApplicationInsights_importBasicWeb(t *testing.T) {
+ resourceName := "azurerm_application_insights.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMApplicationInsights_basicWeb(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMApplicationInsightsDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMApplicationInsights_importBasicOther(t *testing.T) {
+ resourceName := "azurerm_application_insights.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMApplicationInsights_basicWeb(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMApplicationInsightsDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
diff --git a/azurerm/import_arm_cdn_endpoint_test.go b/azurerm/import_arm_cdn_endpoint_test.go
index ee5c8a4bdcc8..3414c9a6272e 100644
--- a/azurerm/import_arm_cdn_endpoint_test.go
+++ b/azurerm/import_arm_cdn_endpoint_test.go
@@ -1,7 +1,6 @@
package azurerm
import (
- "fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
@@ -12,7 +11,7 @@ func TestAccAzureRMCdnEndpoint_importWithTags(t *testing.T) {
resourceName := "azurerm_cdn_endpoint.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMCdnEndpoint_withTags, ri, ri, ri)
+ config := testAccAzureRMCdnEndpoint_withTags(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
diff --git a/azurerm/import_arm_cdn_profile_test.go b/azurerm/import_arm_cdn_profile_test.go
index 1db618a9e60e..3300f49cff99 100644
--- a/azurerm/import_arm_cdn_profile_test.go
+++ b/azurerm/import_arm_cdn_profile_test.go
@@ -1,7 +1,6 @@
package azurerm
import (
- "fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
@@ -12,7 +11,7 @@ func TestAccAzureRMCdnProfile_importWithTags(t *testing.T) {
resourceName := "azurerm_cdn_profile.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMCdnProfile_withTags, ri, ri)
+ config := testAccAzureRMCdnProfile_withTags(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
diff --git a/azurerm/import_arm_cosmosdb_account_test.go b/azurerm/import_arm_cosmosdb_account_test.go
new file mode 100644
index 000000000000..593ff18319f9
--- /dev/null
+++ b/azurerm/import_arm_cosmosdb_account_test.go
@@ -0,0 +1,152 @@
+package azurerm
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+)
+
+func TestAccAzureRMCosmosDBAccount_importBoundedStaleness(t *testing.T) {
+ resourceName := "azurerm_cosmosdb_account.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_boundedStaleness(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_importBoundedStalenessComplete(t *testing.T) {
+ resourceName := "azurerm_cosmosdb_account.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_boundedStalenessComplete(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_importEventualConsistency(t *testing.T) {
+ resourceName := "azurerm_cosmosdb_account.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_eventualConsistency(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_importSession(t *testing.T) {
+ resourceName := "azurerm_cosmosdb_account.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_session(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_importStrong(t *testing.T) {
+ resourceName := "azurerm_cosmosdb_account.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_strong(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_importGeoReplicated(t *testing.T) {
+ resourceName := "azurerm_cosmosdb_account.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_geoReplicated(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
diff --git a/azurerm/import_arm_dns_ptr_record_test.go b/azurerm/import_arm_dns_ptr_record_test.go
new file mode 100644
index 000000000000..d29353cbec16
--- /dev/null
+++ b/azurerm/import_arm_dns_ptr_record_test.go
@@ -0,0 +1,32 @@
+package azurerm
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+)
+
+func TestAccAzureRMDnsPtrRecord_importBasic(t *testing.T) {
+ resourceName := "azurerm_dns_ptr_record.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMDnsPtrRecord_basic(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMDnsPtrRecordDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: config,
+ },
+
+ resource.TestStep{
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
diff --git a/azurerm/import_arm_network_interface_card_test.go b/azurerm/import_arm_network_interface_card_test.go
new file mode 100644
index 000000000000..459b2e96a40d
--- /dev/null
+++ b/azurerm/import_arm_network_interface_card_test.go
@@ -0,0 +1,118 @@
+package azurerm
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+)
+
+func TestAccAzureRMNetworkInterface_importBasic(t *testing.T) {
+ resourceName := "azurerm_network_interface.test"
+ rInt := acctest.RandInt()
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMNetworkInterface_basic(rInt),
+ },
+
+ resource.TestStep{
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMNetworkInterface_importIPForwarding(t *testing.T) {
+ resourceName := "azurerm_network_interface.test"
+ rInt := acctest.RandInt()
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMNetworkInterface_ipForwarding(rInt),
+ },
+
+ resource.TestStep{
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMNetworkInterface_importWithTags(t *testing.T) {
+ resourceName := "azurerm_network_interface.test"
+ rInt := acctest.RandInt()
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMNetworkInterface_withTags(rInt),
+ },
+
+ resource.TestStep{
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMNetworkInterface_importMultipleLoadBalancers(t *testing.T) {
+ resourceName := "azurerm_network_interface.test1"
+ rInt := acctest.RandInt()
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMNetworkInterface_multipleLoadBalancers(rInt),
+ },
+
+ resource.TestStep{
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMNetworkInterface_importPublicIP(t *testing.T) {
+ resourceName := "azurerm_network_interface.test"
+ rInt := acctest.RandInt()
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAzureRMNetworkInterface_publicIP(rInt),
+ },
+
+ resource.TestStep{
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
diff --git a/azurerm/import_arm_servicebus_namespace_test.go b/azurerm/import_arm_servicebus_namespace_test.go
index 2fa623ed3c3f..e1ad9834b8c6 100644
--- a/azurerm/import_arm_servicebus_namespace_test.go
+++ b/azurerm/import_arm_servicebus_namespace_test.go
@@ -1,7 +1,6 @@
package azurerm
import (
- "fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
@@ -12,18 +11,18 @@ func TestAccAzureRMServiceBusNamespace_importBasic(t *testing.T) {
resourceName := "azurerm_servicebus_namespace.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMServiceBusNamespace_basic, ri, ri)
+ config := testAccAzureRMServiceBusNamespace_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
},
- resource.TestStep{
+ {
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
diff --git a/azurerm/import_arm_servicebus_queue_test.go b/azurerm/import_arm_servicebus_queue_test.go
new file mode 100644
index 000000000000..03d943e7d894
--- /dev/null
+++ b/azurerm/import_arm_servicebus_queue_test.go
@@ -0,0 +1,31 @@
+package azurerm
+
+import (
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+)
+
+func TestAccAzureRMServiceBusQueue_importBasic(t *testing.T) {
+ resourceName := "azurerm_servicebus_queue.test"
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMServiceBusQueue_basic(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusQueueDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
diff --git a/azurerm/import_arm_servicebus_topic_test.go b/azurerm/import_arm_servicebus_topic_test.go
index b5a933c65153..58ab5deb351e 100644
--- a/azurerm/import_arm_servicebus_topic_test.go
+++ b/azurerm/import_arm_servicebus_topic_test.go
@@ -1,7 +1,6 @@
package azurerm
import (
- "fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
@@ -12,18 +11,40 @@ func TestAccAzureRMServiceBusTopic_importBasic(t *testing.T) {
resourceName := "azurerm_servicebus_topic.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri)
+ config := testAccAzureRMServiceBusTopic_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
},
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccAzureRMServiceBusTopic_importBasicDisabled(t *testing.T) {
+ resourceName := "azurerm_servicebus_topic.test"
- resource.TestStep{
+ ri := acctest.RandInt()
+ config := testAccAzureRMServiceBusTopic_basicDisabled(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ },
+ {
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
diff --git a/azurerm/import_arm_storage_account_test.go b/azurerm/import_arm_storage_account_test.go
index 89a4e9599844..16954bb85030 100644
--- a/azurerm/import_arm_storage_account_test.go
+++ b/azurerm/import_arm_storage_account_test.go
@@ -3,8 +3,6 @@ package azurerm
import (
"testing"
- "fmt"
-
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
@@ -14,18 +12,18 @@ func TestAccAzureRMStorageAccount_importBasic(t *testing.T) {
ri := acctest.RandInt()
rs := acctest.RandString(4)
- config := fmt.Sprintf(testAccAzureRMStorageAccount_basic, ri, rs)
+ config := testAccAzureRMStorageAccount_basic(ri, rs)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMStorageAccountDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
},
- resource.TestStep{
+ {
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
diff --git a/azurerm/locks.go b/azurerm/locks.go
index ee6774af65a9..c07bd083aa52 100644
--- a/azurerm/locks.go
+++ b/azurerm/locks.go
@@ -1,12 +1,24 @@
package azurerm
-func azureRMUnlockMultiple(names *[]string) {
+// handle the case of using the same name for different kinds of resources
+func azureRMLockByName(name string, resourceType string) {
+ updatedName := resourceType + "." + name
+ armMutexKV.Lock(updatedName)
+}
+
+func azureRMLockMultipleByName(names *[]string, resourceType string) {
for _, name := range *names {
- armMutexKV.Unlock(name)
+ azureRMLockByName(name, resourceType)
}
}
-func azureRMLockMultiple(names *[]string) {
+
+func azureRMUnlockByName(name string, resourceType string) {
+ updatedName := resourceType + "." + name
+ armMutexKV.Unlock(updatedName)
+}
+
+func azureRMUnlockMultipleByName(names *[]string, resourceType string) {
for _, name := range *names {
- armMutexKV.Lock(name)
+ azureRMUnlockByName(name, resourceType)
}
}
diff --git a/azurerm/provider.go b/azurerm/provider.go
index 05c10505cfd9..88d3839bd14f 100644
--- a/azurerm/provider.go
+++ b/azurerm/provider.go
@@ -65,6 +65,7 @@ func Provider() terraform.ResourceProvider {
"azurerm_client_config": dataSourceArmClientConfig(),
"azurerm_resource_group": dataSourceArmResourceGroup(),
"azurerm_public_ip": dataSourceArmPublicIP(),
+ "azurerm_managed_disk": dataSourceArmManagedDisk(),
},
ResourcesMap: map[string]*schema.Resource{
@@ -74,6 +75,8 @@ func Provider() terraform.ResourceProvider {
"azurerm_cdn_profile": resourceArmCdnProfile(),
"azurerm_container_registry": resourceArmContainerRegistry(),
"azurerm_container_service": resourceArmContainerService(),
+ "azurerm_cosmosdb_account": resourceArmCosmosDBAccount(),
+ "azurerm_dns_ptr_record": resourceArmDnsPtrRecord(),
"azurerm_eventhub": resourceArmEventHub(),
"azurerm_eventhub_authorization_rule": resourceArmEventHubAuthorizationRule(),
@@ -101,6 +104,7 @@ func Provider() terraform.ResourceProvider {
"azurerm_route": resourceArmRoute(),
"azurerm_route_table": resourceArmRouteTable(),
"azurerm_servicebus_namespace": resourceArmServiceBusNamespace(),
+ "azurerm_servicebus_queue": resourceArmServiceBusQueue(),
"azurerm_servicebus_subscription": resourceArmServiceBusSubscription(),
"azurerm_servicebus_topic": resourceArmServiceBusTopic(),
"azurerm_sql_elasticpool": resourceArmSqlElasticPool(),
@@ -120,6 +124,8 @@ func Provider() terraform.ResourceProvider {
"azurerm_virtual_network": resourceArmVirtualNetwork(),
"azurerm_virtual_network_peering": resourceArmVirtualNetworkPeering(),
+ "azurerm_application_insights": resourceArmApplicationInsights(),
+
// These resources use the Riviera SDK
"azurerm_dns_a_record": resourceArmDnsARecord(),
"azurerm_dns_aaaa_record": resourceArmDnsAAAARecord(),
diff --git a/azurerm/resource_arm_application_insights.go b/azurerm/resource_arm_application_insights.go
new file mode 100644
index 000000000000..8218cdb942e5
--- /dev/null
+++ b/azurerm/resource_arm_application_insights.go
@@ -0,0 +1,164 @@
+package azurerm
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/arm/appinsights"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/helper/validation"
+)
+
+func resourceArmApplicationInsights() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmApplicationInsightsCreateOrUpdate,
+ Read: resourceArmApplicationInsightsRead,
+ Update: resourceArmApplicationInsightsCreateOrUpdate,
+ Delete: resourceArmApplicationInsightsDelete,
+ Importer: &schema.ResourceImporter{
+ State: schema.ImportStatePassthrough,
+ },
+
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "resource_group_name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "location": locationSchema(),
+
+ "application_type": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ DiffSuppressFunc: ignoreCaseDiffSuppressFunc,
+ ValidateFunc: validation.StringInSlice([]string{
+ string(appinsights.Web),
+ string(appinsights.Other),
+ }, true),
+ },
+
+ "tags": tagsSchema(),
+
+ "app_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "instrumentation_key": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceArmApplicationInsightsCreateOrUpdate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient).appInsightsClient
+
+ log.Printf("[INFO] preparing arguments for AzureRM Application Insights creation.")
+
+ name := d.Get("name").(string)
+ resGroup := d.Get("resource_group_name").(string)
+ applicationType := d.Get("application_type").(string)
+ location := d.Get("location").(string)
+ tags := d.Get("tags").(map[string]interface{})
+
+ applicationInsightsComponentProperties := appinsights.ApplicationInsightsComponentProperties{
+ ApplicationID: &name,
+ ApplicationType: appinsights.ApplicationType(applicationType),
+ }
+
+ insightProperties := appinsights.ApplicationInsightsComponent{
+ Name: &name,
+ Location: &location,
+ Kind: &applicationType,
+ ApplicationInsightsComponentProperties: &applicationInsightsComponentProperties,
+ Tags: expandTags(tags),
+ }
+
+ _, err := client.CreateOrUpdate(resGroup, name, insightProperties)
+ if err != nil {
+ return err
+ }
+
+ read, err := client.Get(resGroup, name)
+ if err != nil {
+ return err
+ }
+ if read.ID == nil {
+ return fmt.Errorf("Cannot read AzureRM Application Insights '%s' (Resource Group %s) ID", name, resGroup)
+ }
+
+ d.SetId(*read.ID)
+
+ return resourceArmApplicationInsightsRead(d, meta)
+}
+
+func resourceArmApplicationInsightsRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient).appInsightsClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+
+ log.Printf("[DEBUG] Reading AzureRM Application Insights '%s'", id)
+
+ resGroup := id.ResourceGroup
+ name := id.Path["components"]
+
+ resp, err := client.Get(resGroup, name)
+ if err != nil {
+ if resp.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
+ return fmt.Errorf("Error making Read request on AzureRM Application Insights '%s': %+v", name, err)
+ }
+
+ d.Set("name", name)
+ d.Set("resource_group_name", resGroup)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
+
+ if props := resp.ApplicationInsightsComponentProperties; props != nil {
+ d.Set("application_type", string(props.ApplicationType))
+ d.Set("app_id", props.AppID)
+ d.Set("instrumentation_key", props.InstrumentationKey)
+ }
+
+ flattenAndSetTags(d, resp.Tags)
+
+ return nil
+}
+
+func resourceArmApplicationInsightsDelete(d *schema.ResourceData, meta interface{}) error {
+ AppInsightsClient := meta.(*ArmClient).appInsightsClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["components"]
+
+ log.Printf("[DEBUG] Deleting AzureRM Application Insights '%s' (resource group '%s')", name, resGroup)
+
+ resp, err := AppInsightsClient.Delete(resGroup, name)
+ if err != nil {
+ if resp.StatusCode == http.StatusNotFound {
+ return nil
+ }
+ return fmt.Errorf("Error issuing AzureRM delete request for Application Insights '%s': %+v", name, err)
+ }
+
+ return err
+}
diff --git a/azurerm/resource_arm_application_insights_test.go b/azurerm/resource_arm_application_insights_test.go
new file mode 100644
index 000000000000..1ee45ea22404
--- /dev/null
+++ b/azurerm/resource_arm_application_insights_test.go
@@ -0,0 +1,137 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureRMApplicationInsights_basicWeb(t *testing.T) {
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMApplicationInsights_basicWeb(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMApplicationInsightsDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMApplicationInsightsExists("azurerm_application_insights.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMApplicationInsights_basicOther(t *testing.T) {
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMApplicationInsights_basicOther(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMApplicationInsightsDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMApplicationInsightsExists("azurerm_application_insights.test"),
+ ),
+ },
+ },
+ })
+}
+
+func testCheckAzureRMApplicationInsightsDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*ArmClient).appInsightsClient
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_application_insights" {
+ continue
+ }
+
+ name := rs.Primary.Attributes["name"]
+ resourceGroup := rs.Primary.Attributes["resource_group_name"]
+
+ resp, err := conn.Get(resourceGroup, name)
+
+ if err != nil {
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("Application Insights still exists:\n%#v", resp.ApplicationInsightsComponentProperties)
+ }
+ }
+
+ return nil
+}
+
+func testCheckAzureRMApplicationInsightsExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // Ensure we have enough information in state to look up in API
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ name := rs.Primary.Attributes["name"]
+ resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
+ if !hasResourceGroup {
+ return fmt.Errorf("Bad: no resource group found in state for App Insights: %s", name)
+ }
+
+ conn := testAccProvider.Meta().(*ArmClient).appInsightsClient
+
+ resp, err := conn.Get(resourceGroup, name)
+ if err != nil {
+ return fmt.Errorf("Bad: Get on appInsightsClient: %s", err)
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Bad: Application Insights '%q' (resource group: '%q') does not exist", name, resourceGroup)
+ }
+
+ return nil
+ }
+}
+
+func testAccAzureRMApplicationInsights_basicWeb(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West Europe"
+}
+
+resource "azurerm_application_insights" "test" {
+ name = "acctestappinsights-%d"
+ location = "West Europe"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ application_type = "web"
+}
+`, rInt, rInt)
+}
+
+func testAccAzureRMApplicationInsights_basicOther(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West Europe"
+}
+
+resource "azurerm_application_insights" "test" {
+ name = "acctestappinsights-%d"
+ location = "West Europe"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ application_type = "other"
+}
+`, rInt, rInt)
+}
diff --git a/azurerm/resource_arm_availability_set.go b/azurerm/resource_arm_availability_set.go
index a51ff04465b7..8c59d7fe4055 100644
--- a/azurerm/resource_arm_availability_set.go
+++ b/azurerm/resource_arm_availability_set.go
@@ -143,7 +143,7 @@ func resourceArmAvailabilitySetRead(d *schema.ResourceData, meta interface{}) er
d.Set("platform_update_domain_count", availSet.PlatformUpdateDomainCount)
d.Set("platform_fault_domain_count", availSet.PlatformFaultDomainCount)
d.Set("name", resp.Name)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
if resp.Sku != nil && resp.Sku.Name != nil {
d.Set("managed", strings.EqualFold(*resp.Sku.Name, "Aligned"))
diff --git a/azurerm/resource_arm_cdn_endpoint.go b/azurerm/resource_arm_cdn_endpoint.go
index a06a24e60755..358da7ed3c26 100644
--- a/azurerm/resource_arm_cdn_endpoint.go
+++ b/azurerm/resource_arm_cdn_endpoint.go
@@ -260,10 +260,6 @@ func resourceArmCdnEndpointRead(d *schema.ResourceData, meta interface{}) error
func resourceArmCdnEndpointUpdate(d *schema.ResourceData, meta interface{}) error {
cdnEndpointsClient := meta.(*ArmClient).cdnEndpointsClient
- if !d.HasChange("tags") {
- return nil
- }
-
name := d.Get("name").(string)
resGroup := d.Get("resource_group_name").(string)
profileName := d.Get("profile_name").(string)
diff --git a/azurerm/resource_arm_cdn_endpoint_test.go b/azurerm/resource_arm_cdn_endpoint_test.go
index 1c4928079b1c..e643172035c1 100644
--- a/azurerm/resource_arm_cdn_endpoint_test.go
+++ b/azurerm/resource_arm_cdn_endpoint_test.go
@@ -11,8 +11,9 @@ import (
)
func TestAccAzureRMCdnEndpoint_basic(t *testing.T) {
+ resourceName := "azurerm_cdn_endpoint.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMCdnEndpoint_basic, ri, ri, ri)
+ config := testAccAzureRMCdnEndpoint_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -22,7 +23,7 @@ func TestAccAzureRMCdnEndpoint_basic(t *testing.T) {
{
Config: config,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMCdnEndpointExists("azurerm_cdn_endpoint.test"),
+ testCheckAzureRMCdnEndpointExists(resourceName),
),
},
},
@@ -30,8 +31,9 @@ func TestAccAzureRMCdnEndpoint_basic(t *testing.T) {
}
func TestAccAzureRMCdnEndpoint_disappears(t *testing.T) {
+ resourceName := "azurerm_cdn_endpoint.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMCdnEndpoint_basic, ri, ri, ri)
+ config := testAccAzureRMCdnEndpoint_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -41,8 +43,8 @@ func TestAccAzureRMCdnEndpoint_disappears(t *testing.T) {
{
Config: config,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMCdnEndpointExists("azurerm_cdn_endpoint.test"),
- testCheckAzureRMCdnEndpointDisappears("azurerm_cdn_endpoint.test"),
+ testCheckAzureRMCdnEndpointExists(resourceName),
+ testCheckAzureRMCdnEndpointDisappears(resourceName),
),
ExpectNonEmptyPlan: true,
},
@@ -50,10 +52,40 @@ func TestAccAzureRMCdnEndpoint_disappears(t *testing.T) {
})
}
+func TestAccAzureRMCdnEndpoint_updateHostHeader(t *testing.T) {
+ resourceName := "azurerm_cdn_endpoint.test"
+ ri := acctest.RandInt()
+ config := testAccAzureRMCdnEndpoint_hostHeader(ri, "www.example.com")
+ updatedConfig := testAccAzureRMCdnEndpoint_hostHeader(ri, "www.example2.com")
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCdnEndpointDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMCdnEndpointExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "origin_host_header", "www.example.com"),
+ ),
+ },
+ {
+ Config: updatedConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMCdnEndpointExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "origin_host_header", "www.example2.com"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAzureRMCdnEndpoint_withTags(t *testing.T) {
+ resourceName := "azurerm_cdn_endpoint.test"
ri := acctest.RandInt()
- preConfig := fmt.Sprintf(testAccAzureRMCdnEndpoint_withTags, ri, ri, ri)
- postConfig := fmt.Sprintf(testAccAzureRMCdnEndpoint_withTagsUpdate, ri, ri, ri)
+ preConfig := testAccAzureRMCdnEndpoint_withTags(ri)
+ postConfig := testAccAzureRMCdnEndpoint_withTagsUpdate(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -63,24 +95,19 @@ func TestAccAzureRMCdnEndpoint_withTags(t *testing.T) {
{
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMCdnEndpointExists("azurerm_cdn_endpoint.test"),
- resource.TestCheckResourceAttr(
- "azurerm_cdn_endpoint.test", "tags.%", "2"),
- resource.TestCheckResourceAttr(
- "azurerm_cdn_endpoint.test", "tags.environment", "Production"),
- resource.TestCheckResourceAttr(
- "azurerm_cdn_endpoint.test", "tags.cost_center", "MSFT"),
+ testCheckAzureRMCdnEndpointExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "tags.%", "2"),
+ resource.TestCheckResourceAttr(resourceName, "tags.environment", "Production"),
+ resource.TestCheckResourceAttr(resourceName, "tags.cost_center", "MSFT"),
),
},
{
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMCdnEndpointExists("azurerm_cdn_endpoint.test"),
- resource.TestCheckResourceAttr(
- "azurerm_cdn_endpoint.test", "tags.%", "1"),
- resource.TestCheckResourceAttr(
- "azurerm_cdn_endpoint.test", "tags.environment", "staging"),
+ testCheckAzureRMCdnEndpointExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "tags.%", "1"),
+ resource.TestCheckResourceAttr(resourceName, "tags.environment", "staging"),
),
},
},
@@ -169,7 +196,8 @@ func testCheckAzureRMCdnEndpointDestroy(s *terraform.State) error {
return nil
}
-var testAccAzureRMCdnEndpoint_basic = `
+func testAccAzureRMCdnEndpoint_basic(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -194,9 +222,46 @@ resource "azurerm_cdn_endpoint" "test" {
http_port = 80
}
}
-`
+`, rInt, rInt, rInt)
+}
+
+func testAccAzureRMCdnEndpoint_hostHeader(rInt int, domain string) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_cdn_profile" "test" {
+ name = "acctestcdnprof%d"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ sku = "Standard_Verizon"
+}
+
+resource "azurerm_cdn_endpoint" "test" {
+ name = "acctestcdnend%d"
+ profile_name = "${azurerm_cdn_profile.test.name}"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ origin_host_header = "%s"
+
+ origin {
+ name = "acceptanceTestCdnOrigin2"
+ host_name = "www.example.com"
+ https_port = 443
+ http_port = 80
+ }
+
+ tags {
+ environment = "Production"
+ cost_center = "MSFT"
+ }
+}
+`, rInt, rInt, rInt, domain)
+}
-var testAccAzureRMCdnEndpoint_withTags = `
+func testAccAzureRMCdnEndpoint_withTags(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -226,9 +291,11 @@ resource "azurerm_cdn_endpoint" "test" {
cost_center = "MSFT"
}
}
-`
+`, rInt, rInt, rInt)
+}
-var testAccAzureRMCdnEndpoint_withTagsUpdate = `
+func testAccAzureRMCdnEndpoint_withTagsUpdate(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -257,4 +324,5 @@ resource "azurerm_cdn_endpoint" "test" {
environment = "staging"
}
}
-`
+`, rInt, rInt, rInt)
+}
diff --git a/azurerm/resource_arm_cdn_profile_test.go b/azurerm/resource_arm_cdn_profile_test.go
index fbbd5e8faaaf..0e80fcb2b626 100644
--- a/azurerm/resource_arm_cdn_profile_test.go
+++ b/azurerm/resource_arm_cdn_profile_test.go
@@ -51,9 +51,8 @@ func TestResourceAzureRMCdnProfileSKU_validation(t *testing.T) {
}
func TestAccAzureRMCdnProfile_basic(t *testing.T) {
-
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMCdnProfile_basic, ri, ri)
+ config := testAccAzureRMCdnProfile_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -73,8 +72,8 @@ func TestAccAzureRMCdnProfile_basic(t *testing.T) {
func TestAccAzureRMCdnProfile_withTags(t *testing.T) {
ri := acctest.RandInt()
- preConfig := fmt.Sprintf(testAccAzureRMCdnProfile_withTags, ri, ri)
- postConfig := fmt.Sprintf(testAccAzureRMCdnProfile_withTagsUpdate, ri, ri)
+ preConfig := testAccAzureRMCdnProfile_withTags(ri)
+ postConfig := testAccAzureRMCdnProfile_withTagsUpdate(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -109,7 +108,6 @@ func TestAccAzureRMCdnProfile_withTags(t *testing.T) {
}
func TestAccAzureRMCdnProfile_NonStandardCasing(t *testing.T) {
-
ri := acctest.RandInt()
config := testAccAzureRMCdnProfileNonStandardCasing(ri)
@@ -118,14 +116,13 @@ func TestAccAzureRMCdnProfile_NonStandardCasing(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMCdnProfileDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMCdnProfileExists("azurerm_cdn_profile.test"),
),
},
-
- resource.TestStep{
+ {
Config: config,
PlanOnly: true,
ExpectNonEmptyPlan: false,
@@ -188,7 +185,8 @@ func testCheckAzureRMCdnProfileDestroy(s *terraform.State) error {
return nil
}
-var testAccAzureRMCdnProfile_basic = `
+func testAccAzureRMCdnProfile_basic(ri int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -199,13 +197,16 @@ resource "azurerm_cdn_profile" "test" {
resource_group_name = "${azurerm_resource_group.test.name}"
sku = "Standard_Verizon"
}
-`
+`, ri, ri)
+}
-var testAccAzureRMCdnProfile_withTags = `
+func testAccAzureRMCdnProfile_withTags(ri int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
}
+
resource "azurerm_cdn_profile" "test" {
name = "acctestcdnprof%d"
location = "West US"
@@ -217,9 +218,10 @@ resource "azurerm_cdn_profile" "test" {
cost_center = "MSFT"
}
}
-`
-
-var testAccAzureRMCdnProfile_withTagsUpdate = `
+`, ri, ri)
+}
+func testAccAzureRMCdnProfile_withTagsUpdate(ri int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -234,7 +236,8 @@ resource "azurerm_cdn_profile" "test" {
environment = "staging"
}
}
-`
+`, ri, ri)
+}
func testAccAzureRMCdnProfileNonStandardCasing(ri int) string {
return fmt.Sprintf(`
diff --git a/azurerm/resource_arm_container_service.go b/azurerm/resource_arm_container_service.go
index da9db3580f68..54b7ed3baa3d 100644
--- a/azurerm/resource_arm_container_service.go
+++ b/azurerm/resource_arm_container_service.go
@@ -336,10 +336,11 @@ func flattenAzureRmContainerServiceMasterProfile(profile containerservice.Master
F: resourceAzureRMContainerServiceMasterProfileHash,
}
- masterProfile := make(map[string]interface{}, 2)
+ masterProfile := make(map[string]interface{}, 3)
masterProfile["count"] = int(*profile.Count)
masterProfile["dns_prefix"] = *profile.DNSPrefix
+ masterProfile["fqdn"] = *profile.Fqdn
masterProfiles.Add(masterProfile)
diff --git a/azurerm/resource_arm_container_service_test.go b/azurerm/resource_arm_container_service_test.go
index 36c1ff19fe83..d6597a3fe8f6 100644
--- a/azurerm/resource_arm_container_service_test.go
+++ b/azurerm/resource_arm_container_service_test.go
@@ -3,6 +3,7 @@ package azurerm
import (
"fmt"
"net/http"
+ "os"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
@@ -96,7 +97,9 @@ func TestAccAzureRMContainerService_dcosBasic(t *testing.T) {
func TestAccAzureRMContainerService_kubernetesBasic(t *testing.T) {
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMContainerService_kubernetesBasic, ri, ri, ri, ri, ri)
+ clientId := os.Getenv("ARM_CLIENT_ID")
+ clientSecret := os.Getenv("ARM_CLIENT_SECRET")
+ config := testAccAzureRMContainerService_kubernetesBasic(ri, clientId, clientSecret)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -115,7 +118,9 @@ func TestAccAzureRMContainerService_kubernetesBasic(t *testing.T) {
func TestAccAzureRMContainerService_kubernetesComplete(t *testing.T) {
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMContainerService_kubernetesComplete, ri, ri, ri, ri, ri)
+ clientId := os.Getenv("ARM_CLIENT_ID")
+ clientSecret := os.Getenv("ARM_CLIENT_SECRET")
+ config := testAccAzureRMContainerService_kubernetesComplete(ri, clientId, clientSecret)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -189,7 +194,8 @@ resource "azurerm_container_service" "test" {
}
`
-var testAccAzureRMContainerService_kubernetesBasic = `
+func testAccAzureRMContainerService_kubernetesBasic(rInt int, clientId string, clientSecret string) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "East US"
@@ -222,17 +228,19 @@ resource "azurerm_container_service" "test" {
}
service_principal {
- client_id = "00000000-0000-0000-0000-000000000000"
- client_secret = "00000000000000000000000000000000"
+ client_id = "%s"
+ client_secret = "%s"
}
diagnostics_profile {
enabled = false
}
}
-`
+`, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret)
+}
-var testAccAzureRMContainerService_kubernetesComplete = `
+func testAccAzureRMContainerService_kubernetesComplete(rInt int, clientId string, clientSecret string) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "East US"
@@ -265,8 +273,8 @@ resource "azurerm_container_service" "test" {
}
service_principal {
- client_id = "00000000-0000-0000-0000-000000000000"
- client_secret = "00000000000000000000000000000000"
+ client_id = "%s"
+ client_secret = "%s"
}
diagnostics_profile {
@@ -277,7 +285,8 @@ resource "azurerm_container_service" "test" {
you = "me"
}
}
-`
+`, rInt, rInt, rInt, rInt, rInt, clientId, clientSecret)
+}
var testAccAzureRMContainerService_swarmBasic = `
resource "azurerm_resource_group" "test" {
diff --git a/azurerm/resource_arm_cosmos_db_account.go b/azurerm/resource_arm_cosmos_db_account.go
new file mode 100644
index 000000000000..75c46e6ffb8f
--- /dev/null
+++ b/azurerm/resource_arm_cosmos_db_account.go
@@ -0,0 +1,411 @@
+package azurerm
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net/http"
+ "regexp"
+
+ "github.com/Azure/azure-sdk-for-go/arm/documentdb"
+ "github.com/hashicorp/terraform/helper/hashcode"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/helper/validation"
+)
+
+func resourceArmCosmosDBAccount() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmCosmosDBAccountCreateUpdate,
+ Read: resourceArmCosmosDBAccountRead,
+ Update: resourceArmCosmosDBAccountCreateUpdate,
+ Delete: resourceArmCosmosDBAccountDelete,
+ Importer: &schema.ResourceImporter{
+ State: schema.ImportStatePassthrough,
+ },
+
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validateAzureRmCosmosDBAccountName,
+ },
+
+ "location": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ StateFunc: azureRMNormalizeLocation,
+ },
+
+ "resource_group_name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "offer_type": {
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validation.StringInSlice([]string{
+ string(documentdb.Standard),
+ }, true),
+ },
+
+ "ip_range_filter": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+
+ "consistency_policy": {
+ Type: schema.TypeSet,
+ Required: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "consistency_level": {
+ Type: schema.TypeString,
+ Required: true,
+ DiffSuppressFunc: ignoreCaseDiffSuppressFunc,
+ ValidateFunc: validation.StringInSlice([]string{
+ string(documentdb.BoundedStaleness),
+ string(documentdb.Eventual),
+ string(documentdb.Session),
+ string(documentdb.Strong),
+ }, true),
+ },
+
+ "max_interval_in_seconds": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 5,
+ ValidateFunc: validation.IntBetween(1, 100),
+ },
+
+ "max_staleness_prefix": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 100,
+ ValidateFunc: validation.IntBetween(1, 2147483647),
+ },
+ },
+ },
+ Set: resourceAzureRMCosmosDBAccountConsistencyPolicyHash,
+ },
+
+ "failover_policy": {
+ Type: schema.TypeSet,
+ Required: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "location": {
+ Type: schema.TypeString,
+ Required: true,
+ StateFunc: azureRMNormalizeLocation,
+ },
+
+ "priority": {
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ },
+ },
+ Set: resourceAzureRMCosmosDBAccountFailoverPolicyHash,
+ },
+
+ "primary_master_key": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "secondary_master_key": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "primary_readonly_master_key": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "secondary_readonly_master_key": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "tags": tagsSchema(),
+ },
+ }
+}
+
+func resourceArmCosmosDBAccountCreateUpdate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient).documentDBClient
+ log.Printf("[INFO] preparing arguments for AzureRM Cosmos DB Account creation.")
+
+ name := d.Get("name").(string)
+ location := d.Get("location").(string)
+ resGroup := d.Get("resource_group_name").(string)
+ offerType := d.Get("offer_type").(string)
+ ipRangeFilter := d.Get("ip_range_filter").(string)
+
+ consistencyPolicy := expandAzureRmCosmosDBAccountConsistencyPolicy(d)
+ failoverPolicies, err := expandAzureRmCosmosDBAccountFailoverPolicies(name, d)
+ if err != nil {
+ return err
+ }
+ tags := d.Get("tags").(map[string]interface{})
+
+ parameters := documentdb.DatabaseAccountCreateUpdateParameters{
+ Location: &location,
+ DatabaseAccountCreateUpdateProperties: &documentdb.DatabaseAccountCreateUpdateProperties{
+ ConsistencyPolicy: &consistencyPolicy,
+ DatabaseAccountOfferType: &offerType,
+ Locations: &failoverPolicies,
+ IPRangeFilter: &ipRangeFilter,
+ },
+ Tags: expandTags(tags),
+ }
+
+ _, error := client.CreateOrUpdate(resGroup, name, parameters, make(chan struct{}))
+ err = <-error
+ if err != nil {
+ return err
+ }
+
+ read, err := client.Get(resGroup, name)
+ if err != nil {
+ return err
+ }
+
+ if read.ID == nil {
+ return fmt.Errorf("Cannot read CosmosDB Account '%s' (resource group %s) ID", name, resGroup)
+ }
+
+ d.SetId(*read.ID)
+
+ return resourceArmCosmosDBAccountRead(d, meta)
+}
+
+func resourceArmCosmosDBAccountRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient).documentDBClient
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["databaseAccounts"]
+
+ resp, err := client.Get(resGroup, name)
+ if err != nil {
+ return fmt.Errorf("Error making Read request on AzureRM CosmosDB Account '%s': %s", name, err)
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
+
+ d.Set("name", resp.Name)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
+ d.Set("resource_group_name", resGroup)
+ d.Set("offer_type", string(resp.DatabaseAccountOfferType))
+ d.Set("ip_range_filter", resp.IPRangeFilter)
+ flattenAndSetAzureRmCosmosDBAccountConsistencyPolicy(d, resp.ConsistencyPolicy)
+ flattenAndSetAzureRmCosmosDBAccountFailoverPolicy(d, resp.FailoverPolicies)
+
+ keys, err := client.ListKeys(resGroup, name)
+ if err != nil {
+ log.Printf("[ERROR] Unable to List Write keys for CosmosDB Account %s: %s", name, err)
+ } else {
+ d.Set("primary_master_key", keys.PrimaryMasterKey)
+ d.Set("secondary_master_key", keys.SecondaryMasterKey)
+ }
+
+ readonlyKeys, err := client.ListReadOnlyKeys(resGroup, name)
+ if err != nil {
+ log.Printf("[ERROR] Unable to List read-only keys for CosmosDB Account %s: %s", name, err)
+ } else {
+ d.Set("primary_readonly_master_key", readonlyKeys.PrimaryReadonlyMasterKey)
+ d.Set("secondary_readonly_master_key", readonlyKeys.SecondaryReadonlyMasterKey)
+ }
+
+ flattenAndSetTags(d, resp.Tags)
+
+ return nil
+}
+
+func resourceArmCosmosDBAccountDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient).documentDBClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ name := id.Path["databaseAccounts"]
+
+ deleteResp, error := client.Delete(resGroup, name, make(chan struct{}))
+ resp := <-deleteResp
+ err = <-error
+
+ if err != nil {
+ if resp.StatusCode == http.StatusNotFound {
+ return nil
+ }
+
+ return fmt.Errorf("Error issuing AzureRM delete request for CosmosDB Account '%s': %+v", name, err)
+ }
+
+ return nil
+}
+
+func expandAzureRmCosmosDBAccountConsistencyPolicy(d *schema.ResourceData) documentdb.ConsistencyPolicy {
+ inputs := d.Get("consistency_policy").(*schema.Set).List()
+ input := inputs[0].(map[string]interface{})
+
+ consistencyLevel := input["consistency_level"].(string)
+
+ policy := documentdb.ConsistencyPolicy{
+ DefaultConsistencyLevel: documentdb.DefaultConsistencyLevel(consistencyLevel),
+ }
+
+ if stalenessPrefix := input["max_staleness_prefix"].(int); stalenessPrefix > 0 {
+ maxStalenessPrefix := int64(stalenessPrefix)
+ policy.MaxStalenessPrefix = &maxStalenessPrefix
+ }
+
+ if maxInterval := input["max_interval_in_seconds"].(int); maxInterval > 0 {
+ maxIntervalInSeconds := int32(maxInterval)
+ policy.MaxIntervalInSeconds = &maxIntervalInSeconds
+ }
+
+ return policy
+}
+
+func expandAzureRmCosmosDBAccountFailoverPolicies(databaseName string, d *schema.ResourceData) ([]documentdb.Location, error) {
+ input := d.Get("failover_policy").(*schema.Set).List()
+ locations := make([]documentdb.Location, 0, len(input))
+
+ for _, configRaw := range input {
+ data := configRaw.(map[string]interface{})
+
+ locationName := azureRMNormalizeLocation(data["location"].(string))
+ id := fmt.Sprintf("%s-%s", databaseName, locationName)
+ failoverPriority := int32(data["priority"].(int))
+
+ location := documentdb.Location{
+ ID: &id,
+ LocationName: &locationName,
+ FailoverPriority: &failoverPriority,
+ }
+
+ locations = append(locations, location)
+ }
+
+ containsWriteLocation := false
+ writeFailoverPriority := int32(0)
+ for _, location := range locations {
+ if *location.FailoverPriority == writeFailoverPriority {
+ containsWriteLocation = true
+ break
+ }
+ }
+
+ // all priorities must be unique
+ locationIds := make(map[int]struct{}, len(locations))
+ for _, location := range locations {
+ priority := int(*location.FailoverPriority)
+ if _, ok := locationIds[priority]; ok {
+ err := fmt.Errorf("Each Failover Policy needs to be unique")
+ return nil, err
+ }
+
+ locationIds[priority] = struct{}{}
+ }
+
+ if !containsWriteLocation {
+ err := fmt.Errorf("Failover Policy should contain a Write Location (Location '0')")
+ return nil, err
+ }
+
+ return locations, nil
+}
+
+func flattenAndSetAzureRmCosmosDBAccountConsistencyPolicy(d *schema.ResourceData, policy *documentdb.ConsistencyPolicy) {
+ results := schema.Set{
+ F: resourceAzureRMCosmosDBAccountConsistencyPolicyHash,
+ }
+
+ result := map[string]interface{}{}
+ result["consistency_level"] = string(policy.DefaultConsistencyLevel)
+ result["max_interval_in_seconds"] = int(*policy.MaxIntervalInSeconds)
+ result["max_staleness_prefix"] = int(*policy.MaxStalenessPrefix)
+ results.Add(result)
+
+ d.Set("consistency_policy", &results)
+}
+
+func flattenAndSetAzureRmCosmosDBAccountFailoverPolicy(d *schema.ResourceData, list *[]documentdb.FailoverPolicy) {
+ results := schema.Set{
+ F: resourceAzureRMCosmosDBAccountFailoverPolicyHash,
+ }
+
+ for _, i := range *list {
+ result := map[string]interface{}{
+ "id": *i.ID,
+ "location": azureRMNormalizeLocation(*i.LocationName),
+ "priority": int(*i.FailoverPriority),
+ }
+
+ results.Add(result)
+ }
+
+ d.Set("failover_policy", &results)
+}
+
+func resourceAzureRMCosmosDBAccountConsistencyPolicyHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+
+ consistencyLevel := m["consistency_level"].(string)
+ maxInterval := m["max_interval_in_seconds"].(int)
+ maxStalenessPrefix := m["max_staleness_prefix"].(int)
+
+ buf.WriteString(fmt.Sprintf("%s-%d-%d", consistencyLevel, maxInterval, maxStalenessPrefix))
+
+ return hashcode.String(buf.String())
+}
+
+func resourceAzureRMCosmosDBAccountFailoverPolicyHash(v interface{}) int {
+ var buf bytes.Buffer
+ m := v.(map[string]interface{})
+
+ locationName := m["location"].(string)
+ location := azureRMNormalizeLocation(locationName)
+ priority := int32(m["priority"].(int))
+
+ buf.WriteString(fmt.Sprintf("%s-%d", location, priority))
+
+ return hashcode.String(buf.String())
+}
+
+func validateAzureRmCosmosDBAccountName(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+
+ r, _ := regexp.Compile("[a-z0-9-]")
+ if !r.MatchString(value) {
+ errors = append(errors, fmt.Errorf("CosmosDB Account Name can only contain lower-case characters, numbers and the `-` character."))
+ }
+
+ length := len(value)
+ if length > 50 || 3 > length {
+ errors = append(errors, fmt.Errorf("CosmosDB Account Name can only be between 3 and 50 seconds."))
+ }
+
+ return
+}
diff --git a/azurerm/resource_arm_cosmos_db_account_test.go b/azurerm/resource_arm_cosmos_db_account_test.go
new file mode 100644
index 000000000000..14ad1a893701
--- /dev/null
+++ b/azurerm/resource_arm_cosmos_db_account_test.go
@@ -0,0 +1,368 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureRMCosmosDBAccountName_validation(t *testing.T) {
+ str := acctest.RandString(50)
+ cases := []struct {
+ Value string
+ ErrCount int
+ }{
+ {
+ Value: "ab",
+ ErrCount: 1,
+ },
+ {
+ Value: "abc",
+ ErrCount: 0,
+ },
+ {
+ Value: str,
+ ErrCount: 0,
+ },
+ {
+ Value: str + "a",
+ ErrCount: 1,
+ },
+ }
+
+ for _, tc := range cases {
+ _, errors := validateAzureRmCosmosDBAccountName(tc.Value, "azurerm_cosmosdb_account")
+
+ if len(errors) != tc.ErrCount {
+ t.Fatalf("Expected the AzureRM CosmosDB Name to trigger a validation error for '%s'", tc.Value)
+ }
+ }
+}
+
+func TestAccAzureRMCosmosDBAccount_boundedStaleness(t *testing.T) {
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_boundedStaleness(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMCosmosDBAccountExists("azurerm_cosmosdb_account.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_boundedStalenessComplete(t *testing.T) {
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_boundedStalenessComplete(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMCosmosDBAccountExists("azurerm_cosmosdb_account.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_eventualConsistency(t *testing.T) {
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_eventualConsistency(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMCosmosDBAccountExists("azurerm_cosmosdb_account.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_session(t *testing.T) {
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_session(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMCosmosDBAccountExists("azurerm_cosmosdb_account.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_strong(t *testing.T) {
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_strong(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMCosmosDBAccountExists("azurerm_cosmosdb_account.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMCosmosDBAccount_geoReplicated(t *testing.T) {
+
+ ri := acctest.RandInt()
+ config := testAccAzureRMCosmosDBAccount_geoReplicated(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMCosmosDBAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMCosmosDBAccountExists("azurerm_cosmosdb_account.test"),
+ ),
+ },
+ },
+ })
+}
+
+func testCheckAzureRMCosmosDBAccountDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*ArmClient).documentDBClient
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_cosmos_db" {
+ continue
+ }
+
+ name := rs.Primary.Attributes["name"]
+ resourceGroup := rs.Primary.Attributes["resource_group_name"]
+
+ resp, err := conn.Get(resourceGroup, name)
+
+ if err != nil {
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("CosmosDB Account still exists:\n%#v", resp)
+ }
+ }
+
+ return nil
+}
+
+func testCheckAzureRMCosmosDBAccountExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // Ensure we have enough information in state to look up in API
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ name := rs.Primary.Attributes["name"]
+ resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
+ if !hasResourceGroup {
+ return fmt.Errorf("Bad: no resource group found in state for CosmosDB Account: '%s'", name)
+ }
+
+ conn := testAccProvider.Meta().(*ArmClient).documentDBClient
+
+ resp, err := conn.Get(resourceGroup, name)
+ if err != nil {
+ return fmt.Errorf("Bad: Get on documentDBClient: %s", err)
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Bad: CosmosDB Account '%s' (resource group: '%s') does not exist", name, resourceGroup)
+ }
+
+ return nil
+ }
+}
+
+func testAccAzureRMCosmosDBAccount_boundedStaleness(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_cosmosdb_account" "test" {
+ name = "acctest-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ offer_type = "Standard"
+
+ consistency_policy {
+ consistency_level = "BoundedStaleness"
+ }
+
+ failover_policy {
+ location = "${azurerm_resource_group.test.location}"
+ priority = 0
+ }
+}
+`, rInt, rInt)
+}
+
+func testAccAzureRMCosmosDBAccount_boundedStalenessComplete(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_cosmosdb_account" "test" {
+ name = "acctest-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ offer_type = "Standard"
+
+ consistency_policy {
+ consistency_level = "BoundedStaleness"
+ max_interval_in_seconds = 10
+ max_staleness_prefix = 200
+ }
+
+ failover_policy {
+ location = "${azurerm_resource_group.test.location}"
+ priority = 0
+ }
+}
+`, rInt, rInt)
+}
+
+func testAccAzureRMCosmosDBAccount_eventualConsistency(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_cosmosdb_account" "test" {
+ name = "acctest-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ offer_type = "Standard"
+
+ consistency_policy {
+ consistency_level = "Eventual"
+ }
+
+ failover_policy {
+ location = "${azurerm_resource_group.test.location}"
+ priority = 0
+ }
+}
+`, rInt, rInt)
+}
+
+func testAccAzureRMCosmosDBAccount_session(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_cosmosdb_account" "test" {
+ name = "acctest-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ offer_type = "Standard"
+
+ consistency_policy {
+ consistency_level = "Session"
+ }
+
+ failover_policy {
+ location = "${azurerm_resource_group.test.location}"
+ priority = 0
+ }
+}
+`, rInt, rInt)
+}
+
+func testAccAzureRMCosmosDBAccount_strong(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_cosmosdb_account" "test" {
+ name = "acctest-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ offer_type = "Standard"
+
+ consistency_policy {
+ consistency_level = "Strong"
+ }
+
+ failover_policy {
+ location = "${azurerm_resource_group.test.location}"
+ priority = 0
+ }
+}
+`, rInt, rInt)
+}
+
+func testAccAzureRMCosmosDBAccount_geoReplicated(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_cosmosdb_account" "test" {
+ name = "acctest-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ offer_type = "Standard"
+
+ consistency_policy {
+ consistency_level = "BoundedStaleness"
+ max_interval_in_seconds = 10
+ max_staleness_prefix = 200
+ }
+
+ failover_policy {
+ location = "${azurerm_resource_group.test.location}"
+ priority = 0
+ }
+
+ failover_policy {
+ location = "West Europe"
+ priority = 1
+ }
+}
+`, rInt, rInt)
+}
diff --git a/azurerm/resource_arm_dns_ptr_record.go b/azurerm/resource_arm_dns_ptr_record.go
new file mode 100644
index 000000000000..3ae7f0a3c4b9
--- /dev/null
+++ b/azurerm/resource_arm_dns_ptr_record.go
@@ -0,0 +1,183 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/arm/dns"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceArmDnsPtrRecord() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmDnsPtrRecordCreateOrUpdate,
+ Read: resourceArmDnsPtrRecordRead,
+ Update: resourceArmDnsPtrRecordCreateOrUpdate,
+ Delete: resourceArmDnsPtrRecordDelete,
+ Importer: &schema.ResourceImporter{
+ State: schema.ImportStatePassthrough,
+ },
+
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "resource_group_name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "zone_name": {
+ Type: schema.TypeString,
+ Required: true,
+ },
+
+ "records": {
+ Type: schema.TypeSet,
+ Required: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
+
+ "ttl": {
+ Type: schema.TypeInt,
+ Required: true,
+ },
+
+ "etag": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "tags": tagsSchema(),
+ },
+ }
+}
+
+func resourceArmDnsPtrRecordCreateOrUpdate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient)
+ dnsClient := client.dnsClient
+
+ name := d.Get("name").(string)
+ resGroup := d.Get("resource_group_name").(string)
+ zoneName := d.Get("zone_name").(string)
+ ttl := int64(d.Get("ttl").(int))
+ eTag := d.Get("etag").(string)
+
+ tags := d.Get("tags").(map[string]interface{})
+ metadata := expandTags(tags)
+
+ records, err := expandAzureRmDnsPtrRecords(d)
+ props := dns.RecordSetProperties{
+ Metadata: metadata,
+ TTL: &ttl,
+ PtrRecords: &records,
+ }
+
+ parameters := dns.RecordSet{
+ Name: &name,
+ RecordSetProperties: &props,
+ }
+
+ //last parameter is set to empty to allow updates to records after creation
+ // (per SDK, set it to '*' to prevent updates, all other values are ignored)
+ resp, err := dnsClient.CreateOrUpdate(resGroup, zoneName, name, dns.PTR, parameters, eTag, "")
+ if err != nil {
+ return err
+ }
+
+ if resp.ID == nil {
+ return fmt.Errorf("Cannot read DNS PTR Record %s (resource group %s) ID", name, resGroup)
+ }
+
+ d.SetId(*resp.ID)
+
+ return resourceArmDnsPtrRecordRead(d, meta)
+}
+
+func resourceArmDnsPtrRecordRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient)
+ dnsClient := client.dnsClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+
+ resGroup := id.ResourceGroup
+ name := id.Path["PTR"]
+ zoneName := id.Path["dnszones"]
+
+ resp, err := dnsClient.Get(resGroup, zoneName, name, dns.PTR)
+ if err != nil {
+ return fmt.Errorf("Error reading DNS PTR record %s: %v", name, err)
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
+
+ d.Set("name", name)
+ d.Set("resource_group_name", resGroup)
+ d.Set("zone_name", zoneName)
+ d.Set("ttl", resp.TTL)
+ d.Set("etag", resp.Etag)
+
+ if err := d.Set("records", flattenAzureRmDnsPtrRecords(resp.PtrRecords)); err != nil {
+ return err
+ }
+ flattenAndSetTags(d, resp.Metadata)
+
+ return nil
+}
+
+func resourceArmDnsPtrRecordDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient)
+ dnsClient := client.dnsClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+
+ resGroup := id.ResourceGroup
+ name := id.Path["PTR"]
+ zoneName := id.Path["dnszones"]
+
+ resp, error := dnsClient.Delete(resGroup, zoneName, name, dns.PTR, "")
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("Error deleting DNS PTR Record %s: %s", name, error)
+ }
+
+ return nil
+}
+
+func flattenAzureRmDnsPtrRecords(records *[]dns.PtrRecord) []string {
+ results := make([]string, 0, len(*records))
+
+ if records != nil {
+ for _, record := range *records {
+ results = append(results, *record.Ptrdname)
+ }
+ }
+
+ return results
+}
+
+func expandAzureRmDnsPtrRecords(d *schema.ResourceData) ([]dns.PtrRecord, error) {
+ recordStrings := d.Get("records").(*schema.Set).List()
+ records := make([]dns.PtrRecord, len(recordStrings))
+
+ for i, v := range recordStrings {
+ fqdn := v.(string)
+ records[i] = dns.PtrRecord{
+ Ptrdname: &fqdn,
+ }
+ }
+
+ return records, nil
+}
diff --git a/azurerm/resource_arm_dns_ptr_record_test.go b/azurerm/resource_arm_dns_ptr_record_test.go
new file mode 100644
index 000000000000..1a20e3aaf63d
--- /dev/null
+++ b/azurerm/resource_arm_dns_ptr_record_test.go
@@ -0,0 +1,239 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/Azure/azure-sdk-for-go/arm/dns"
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureRMDnsPtrRecord_basic(t *testing.T) {
+ ri := acctest.RandInt()
+ config := testAccAzureRMDnsPtrRecord_basic(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMDnsPtrRecordDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMDnsPtrRecordExists("azurerm_dns_ptr_record.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMDnsPtrRecord_updateRecords(t *testing.T) {
+ ri := acctest.RandInt()
+ preConfig := testAccAzureRMDnsPtrRecord_basic(ri)
+ postConfig := testAccAzureRMDnsPtrRecord_updateRecords(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMDnsPtrRecordDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: preConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMDnsPtrRecordExists("azurerm_dns_ptr_record.test"),
+ resource.TestCheckResourceAttr("azurerm_dns_ptr_record.test", "records.#", "2"),
+ ),
+ },
+
+ {
+ Config: postConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMDnsPtrRecordExists("azurerm_dns_ptr_record.test"),
+ resource.TestCheckResourceAttr("azurerm_dns_ptr_record.test", "records.#", "3"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMDnsPtrRecord_withTags(t *testing.T) {
+ ri := acctest.RandInt()
+ preConfig := testAccAzureRMDnsPtrRecord_withTags(ri)
+ postConfig := testAccAzureRMDnsPtrRecord_withTagsUpdate(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMDnsPtrRecordDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: preConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMDnsPtrRecordExists("azurerm_dns_ptr_record.test"),
+ resource.TestCheckResourceAttr("azurerm_dns_ptr_record.test", "tags.%", "2"),
+ ),
+ },
+
+ {
+ Config: postConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMDnsPtrRecordExists("azurerm_dns_ptr_record.test"),
+ resource.TestCheckResourceAttr(
+ "azurerm_dns_ptr_record.test", "tags.%", "1"),
+ ),
+ },
+ },
+ })
+}
+
+func testCheckAzureRMDnsPtrRecordExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // Ensure we have enough information in state to look up in API
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ ptrName := rs.Primary.Attributes["name"]
+ zoneName := rs.Primary.Attributes["zone_name"]
+ resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
+ if !hasResourceGroup {
+ return fmt.Errorf("Bad: no resource group found in state for DNS PTR record: %s", ptrName)
+ }
+
+ conn := testAccProvider.Meta().(*ArmClient).dnsClient
+ resp, err := conn.Get(resourceGroup, zoneName, ptrName, dns.PTR)
+ if err != nil {
+ return fmt.Errorf("Bad: Get PTR RecordSet: %v", err)
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Bad: DNS PTR record %s (resource group: %s) does not exist", ptrName, resourceGroup)
+ }
+
+ return nil
+ }
+}
+
+func testCheckAzureRMDnsPtrRecordDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*ArmClient).dnsClient
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_dns_ptr_record" {
+ continue
+ }
+
+ ptrName := rs.Primary.Attributes["name"]
+ zoneName := rs.Primary.Attributes["zone_name"]
+ resourceGroup := rs.Primary.Attributes["resource_group_name"]
+
+ resp, err := conn.Get(resourceGroup, zoneName, ptrName, dns.PTR)
+
+ if err != nil {
+ return nil
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("DNS PTR record still exists:\n%#v", resp.RecordSetProperties)
+ }
+
+ }
+
+ return nil
+}
+
+func testAccAzureRMDnsPtrRecord_basic(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG_%[1]d"
+ location = "West US"
+}
+resource "azurerm_dns_zone" "test" {
+ name = "acctestzone%[1]d.com"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_dns_ptr_record" "test" {
+ name = "testptrrecord%[1]d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ zone_name = "${azurerm_dns_zone.test.name}"
+ ttl = "300"
+ records = ["hashicorp.com", "microsoft.com"]
+}
+`, rInt)
+}
+
+func testAccAzureRMDnsPtrRecord_updateRecords(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG_%[1]d"
+ location = "West US"
+}
+resource "azurerm_dns_zone" "test" {
+ name = "acctestzone%[1]d.com"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_dns_ptr_record" "test" {
+ name = "testptrrecord%[1]d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ zone_name = "${azurerm_dns_zone.test.name}"
+ ttl = "300"
+ records = ["hashicorp.com", "microsoft.com", "reddit.com"]
+}
+`, rInt)
+}
+
+func testAccAzureRMDnsPtrRecord_withTags(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG_%[1]d"
+ location = "West US"
+}
+resource "azurerm_dns_zone" "test" {
+ name = "acctestzone%[1]d.com"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_dns_ptr_record" "test" {
+ name = "testptrrecord%[1]d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ zone_name = "${azurerm_dns_zone.test.name}"
+ ttl = "300"
+ records = ["hashicorp.com", "microsoft.com"]
+
+ tags {
+ environment = "Dev"
+ cost_center = "Ops"
+ }
+}
+`, rInt)
+}
+
+func testAccAzureRMDnsPtrRecord_withTagsUpdate(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG_%[1]d"
+ location = "West US"
+}
+resource "azurerm_dns_zone" "test" {
+ name = "acctestzone%[1]d.com"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_dns_ptr_record" "test" {
+ name = "testptrrecord%[1]d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ zone_name = "${azurerm_dns_zone.test.name}"
+ ttl = "300"
+ records = ["hashicorp.com", "microsoft.com"]
+
+ tags {
+ environment = "Stage"
+ }
+}
+`, rInt)
+}
diff --git a/azurerm/resource_arm_express_route_circuit.go b/azurerm/resource_arm_express_route_circuit.go
index 2c03cbf0398b..e003231a2620 100644
--- a/azurerm/resource_arm_express_route_circuit.go
+++ b/azurerm/resource_arm_express_route_circuit.go
@@ -175,7 +175,7 @@ func resourceArmExpressRouteCircuitRead(d *schema.ResourceData, meta interface{}
d.Set("name", erc.Name)
d.Set("resource_group_name", resGroup)
- d.Set("location", erc.Location)
+ d.Set("location", azureRMNormalizeLocation(*erc.Location))
if erc.ServiceProviderProperties != nil {
d.Set("service_provider_name", erc.ServiceProviderProperties.ServiceProviderName)
diff --git a/azurerm/resource_arm_key_vault.go b/azurerm/resource_arm_key_vault.go
index 2038f9b03e24..2f893cf650c9 100644
--- a/azurerm/resource_arm_key_vault.go
+++ b/azurerm/resource_arm_key_vault.go
@@ -73,6 +73,7 @@ func resourceArmKeyVault() *schema.Resource {
Type: schema.TypeList,
Optional: true,
MinItems: 1,
+ MaxItems: 16,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"tenant_id": {
@@ -313,7 +314,7 @@ func flattenKeyVaultAccessPolicies(policies *[]keyvault.AccessPolicyEntry) []int
}
policyRaw["tenant_id"] = policy.TenantID.String()
- policyRaw["object_id"] = policy.ObjectID
+ policyRaw["object_id"] = *policy.ObjectID
policyRaw["key_permissions"] = keyPermissionsRaw
policyRaw["secret_permissions"] = secretPermissionsRaw
diff --git a/azurerm/resource_arm_loadbalancer.go b/azurerm/resource_arm_loadbalancer.go
index 6b9e4aacc2d0..7b44c728c998 100644
--- a/azurerm/resource_arm_loadbalancer.go
+++ b/azurerm/resource_arm_loadbalancer.go
@@ -174,7 +174,7 @@ func resourecArmLoadBalancerRead(d *schema.ResourceData, meta interface{}) error
}
d.Set("name", loadBalancer.Name)
- d.Set("location", loadBalancer.Location)
+ d.Set("location", azureRMNormalizeLocation(*loadBalancer.Location))
d.Set("resource_group_name", id.ResourceGroup)
if loadBalancer.LoadBalancerPropertiesFormat != nil && loadBalancer.LoadBalancerPropertiesFormat.FrontendIPConfigurations != nil {
diff --git a/azurerm/resource_arm_local_network_gateway.go b/azurerm/resource_arm_local_network_gateway.go
index 1a6005397119..a0c79d434a5f 100644
--- a/azurerm/resource_arm_local_network_gateway.go
+++ b/azurerm/resource_arm_local_network_gateway.go
@@ -118,7 +118,7 @@ func resourceArmLocalNetworkGatewayRead(d *schema.ResourceData, meta interface{}
d.Set("resource_group_name", resGroup)
d.Set("name", resp.Name)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
d.Set("gateway_address", resp.LocalNetworkGatewayPropertiesFormat.GatewayIPAddress)
prefs := []string{}
diff --git a/azurerm/resource_arm_managed_disk.go b/azurerm/resource_arm_managed_disk.go
index 2ca41abf470f..915898496001 100644
--- a/azurerm/resource_arm_managed_disk.go
+++ b/azurerm/resource_arm_managed_disk.go
@@ -191,7 +191,7 @@ func resourceArmManagedDiskRead(d *schema.ResourceData, meta interface{}) error
d.Set("name", resp.Name)
d.Set("resource_group_name", resGroup)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
if resp.Properties != nil {
flattenAzureRmManagedDiskProperties(d, resp.Properties)
diff --git a/azurerm/resource_arm_network_interface_card.go b/azurerm/resource_arm_network_interface_card.go
index 9ad2825dae93..64543eebb10f 100644
--- a/azurerm/resource_arm_network_interface_card.go
+++ b/azurerm/resource_arm_network_interface_card.go
@@ -18,6 +18,9 @@ func resourceArmNetworkInterface() *schema.Resource {
Read: resourceArmNetworkInterfaceRead,
Update: resourceArmNetworkInterfaceCreate,
Delete: resourceArmNetworkInterfaceDelete,
+ Importer: &schema.ResourceImporter{
+ State: schema.ImportStatePassthrough,
+ },
Schema: map[string]*schema.Schema{
"name": {
@@ -178,8 +181,8 @@ func resourceArmNetworkInterfaceCreate(d *schema.ResourceData, meta interface{})
return err
}
- armMutexKV.Lock(networkSecurityGroupName)
- defer armMutexKV.Unlock(networkSecurityGroupName)
+ azureRMLockByName(networkSecurityGroupName, networkSecurityGroupResourceName)
+ defer azureRMUnlockByName(networkSecurityGroupName, networkSecurityGroupResourceName)
}
dns, hasDns := d.GetOk("dns_servers")
@@ -205,13 +208,16 @@ func resourceArmNetworkInterfaceCreate(d *schema.ResourceData, meta interface{})
properties.DNSSettings = &ifaceDnsSettings
}
- ipConfigs, namesToLock, sgErr := expandAzureRmNetworkInterfaceIpConfigurations(d)
+ ipConfigs, subnetnToLock, vnnToLock, sgErr := expandAzureRmNetworkInterfaceIpConfigurations(d)
if sgErr != nil {
return fmt.Errorf("Error Building list of Network Interface IP Configurations: %s", sgErr)
}
- azureRMLockMultiple(namesToLock)
- defer azureRMUnlockMultiple(namesToLock)
+ azureRMLockMultipleByName(subnetnToLock, subnetResourceName)
+ defer azureRMUnlockMultipleByName(subnetnToLock, subnetResourceName)
+
+ azureRMLockMultipleByName(vnnToLock, virtualNetworkResourceName)
+ defer azureRMUnlockMultipleByName(vnnToLock, virtualNetworkResourceName)
if len(ipConfigs) > 0 {
properties.IPConfigurations = &ipConfigs
@@ -282,29 +288,49 @@ func resourceArmNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) e
}
}
+ if iface.IPConfigurations != nil {
+ d.Set("ip_configuration", schema.NewSet(resourceArmNetworkInterfaceIpConfigurationHash, flattenNetworkInterfaceIPConfigurations(iface.IPConfigurations)))
+ }
+
if iface.VirtualMachine != nil {
if *iface.VirtualMachine.ID != "" {
d.Set("virtual_machine_id", *iface.VirtualMachine.ID)
}
}
+ var appliedDNSServers []string
+ var dnsServers []string
if iface.DNSSettings != nil {
if iface.DNSSettings.AppliedDNSServers != nil && len(*iface.DNSSettings.AppliedDNSServers) > 0 {
- dnsServers := make([]string, 0, len(*iface.DNSSettings.AppliedDNSServers))
- for _, dns := range *iface.DNSSettings.AppliedDNSServers {
- dnsServers = append(dnsServers, dns)
+ for _, applied := range *iface.DNSSettings.AppliedDNSServers {
+ appliedDNSServers = append(appliedDNSServers, applied)
}
+ }
- if err := d.Set("applied_dns_servers", dnsServers); err != nil {
- return err
+ if iface.DNSSettings.DNSServers != nil && len(*iface.DNSSettings.DNSServers) > 0 {
+ for _, dns := range *iface.DNSSettings.DNSServers {
+ dnsServers = append(dnsServers, dns)
}
}
if iface.DNSSettings.InternalFqdn != nil && *iface.DNSSettings.InternalFqdn != "" {
d.Set("internal_fqdn", iface.DNSSettings.InternalFqdn)
}
+
+ d.Set("internal_dns_name_label", iface.DNSSettings.InternalDNSNameLabel)
+ }
+
+ if iface.NetworkSecurityGroup != nil {
+ d.Set("network_security_group_id", resp.NetworkSecurityGroup.ID)
}
+ d.Set("name", resp.Name)
+ d.Set("resource_group_name", resGroup)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
+ d.Set("applied_dns_servers", appliedDNSServers)
+ d.Set("dns_servers", dnsServers)
+ d.Set("enable_ip_forwarding", resp.EnableIPForwarding)
+
flattenAndSetTags(d, resp.Tags)
return nil
@@ -327,12 +353,13 @@ func resourceArmNetworkInterfaceDelete(d *schema.ResourceData, meta interface{})
return err
}
- armMutexKV.Lock(networkSecurityGroupName)
- defer armMutexKV.Unlock(networkSecurityGroupName)
+ azureRMLockByName(networkSecurityGroupName, networkSecurityGroupResourceName)
+ defer azureRMUnlockByName(networkSecurityGroupName, networkSecurityGroupResourceName)
}
configs := d.Get("ip_configuration").(*schema.Set).List()
- namesToLock := make([]string, 0)
+ subnetNamesToLock := make([]string, 0)
+ virtualNetworkNamesToLock := make([]string, 0)
for _, configRaw := range configs {
data := configRaw.(map[string]interface{})
@@ -343,13 +370,17 @@ func resourceArmNetworkInterfaceDelete(d *schema.ResourceData, meta interface{})
return err
}
subnetName := subnetId.Path["subnets"]
+ subnetNamesToLock = append(subnetNamesToLock, subnetName)
+
virtualNetworkName := subnetId.Path["virtualNetworks"]
- namesToLock = append(namesToLock, subnetName)
- namesToLock = append(namesToLock, virtualNetworkName)
+ virtualNetworkNamesToLock = append(virtualNetworkNamesToLock, virtualNetworkName)
}
- azureRMLockMultiple(&namesToLock)
- defer azureRMUnlockMultiple(&namesToLock)
+ azureRMLockMultipleByName(&subnetNamesToLock, subnetResourceName)
+ defer azureRMUnlockMultipleByName(&subnetNamesToLock, subnetResourceName)
+
+ azureRMLockMultipleByName(&virtualNetworkNamesToLock, virtualNetworkResourceName)
+ defer azureRMUnlockMultipleByName(&virtualNetworkNamesToLock, virtualNetworkResourceName)
_, error := ifaceClient.Delete(resGroup, name, make(chan struct{}))
err = <-error
@@ -398,10 +429,48 @@ func validateNetworkInterfacePrivateIpAddressAllocation(v interface{}, k string)
return
}
-func expandAzureRmNetworkInterfaceIpConfigurations(d *schema.ResourceData) ([]network.InterfaceIPConfiguration, *[]string, error) {
+func flattenNetworkInterfaceIPConfigurations(ipConfigs *[]network.InterfaceIPConfiguration) []interface{} {
+ result := make([]interface{}, 0, len(*ipConfigs))
+ for _, ipConfig := range *ipConfigs {
+ niIPConfig := make(map[string]interface{})
+ niIPConfig["name"] = *ipConfig.Name
+ niIPConfig["subnet_id"] = *ipConfig.InterfaceIPConfigurationPropertiesFormat.Subnet.ID
+ niIPConfig["private_ip_address_allocation"] = strings.ToLower(string(ipConfig.InterfaceIPConfigurationPropertiesFormat.PrivateIPAllocationMethod))
+
+ if ipConfig.InterfaceIPConfigurationPropertiesFormat.PrivateIPAllocationMethod == network.Static {
+ niIPConfig["private_ip_address"] = *ipConfig.InterfaceIPConfigurationPropertiesFormat.PrivateIPAddress
+ }
+
+ if ipConfig.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress != nil {
+ niIPConfig["public_ip_address_id"] = *ipConfig.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.ID
+ }
+
+ var pools []interface{}
+ if ipConfig.InterfaceIPConfigurationPropertiesFormat.LoadBalancerBackendAddressPools != nil {
+ for _, pool := range *ipConfig.InterfaceIPConfigurationPropertiesFormat.LoadBalancerBackendAddressPools {
+ pools = append(pools, *pool.ID)
+ }
+ }
+ niIPConfig["load_balancer_backend_address_pools_ids"] = schema.NewSet(schema.HashString, pools)
+
+ var rules []interface{}
+ if ipConfig.InterfaceIPConfigurationPropertiesFormat.LoadBalancerInboundNatRules != nil {
+ for _, rule := range *ipConfig.InterfaceIPConfigurationPropertiesFormat.LoadBalancerInboundNatRules {
+ rules = append(rules, *rule.ID)
+ }
+ }
+ niIPConfig["load_balancer_inbound_nat_rules_ids"] = schema.NewSet(schema.HashString, rules)
+
+ result = append(result, niIPConfig)
+ }
+ return result
+}
+
+func expandAzureRmNetworkInterfaceIpConfigurations(d *schema.ResourceData) ([]network.InterfaceIPConfiguration, *[]string, *[]string, error) {
configs := d.Get("ip_configuration").(*schema.Set).List()
ipConfigs := make([]network.InterfaceIPConfiguration, 0, len(configs))
- namesToLock := make([]string, 0)
+ subnetNamesToLock := make([]string, 0)
+ virtualNetworkNamesToLock := make([]string, 0)
for _, configRaw := range configs {
data := configRaw.(map[string]interface{})
@@ -416,7 +485,7 @@ func expandAzureRmNetworkInterfaceIpConfigurations(d *schema.ResourceData) ([]ne
case "static":
allocationMethod = network.Static
default:
- return []network.InterfaceIPConfiguration{}, nil, fmt.Errorf(
+ return []network.InterfaceIPConfiguration{}, nil, nil, fmt.Errorf(
"valid values for private_ip_allocation_method are 'dynamic' and 'static' - got '%s'",
private_ip_allocation_method)
}
@@ -430,12 +499,12 @@ func expandAzureRmNetworkInterfaceIpConfigurations(d *schema.ResourceData) ([]ne
subnetId, err := parseAzureResourceID(subnet_id)
if err != nil {
- return []network.InterfaceIPConfiguration{}, nil, err
+ return []network.InterfaceIPConfiguration{}, nil, nil, err
}
subnetName := subnetId.Path["subnets"]
virtualNetworkName := subnetId.Path["virtualNetworks"]
- namesToLock = append(namesToLock, subnetName)
- namesToLock = append(namesToLock, virtualNetworkName)
+ subnetNamesToLock = append(subnetNamesToLock, subnetName)
+ virtualNetworkNamesToLock = append(virtualNetworkNamesToLock, virtualNetworkName)
if v := data["private_ip_address"].(string); v != "" {
properties.PrivateIPAddress = &v
@@ -486,5 +555,5 @@ func expandAzureRmNetworkInterfaceIpConfigurations(d *schema.ResourceData) ([]ne
ipConfigs = append(ipConfigs, ipConfig)
}
- return ipConfigs, &namesToLock, nil
+ return ipConfigs, &subnetNamesToLock, &virtualNetworkNamesToLock, nil
}
diff --git a/azurerm/resource_arm_network_interface_card_test.go b/azurerm/resource_arm_network_interface_card_test.go
index c885cc0b0211..2043d481845a 100644
--- a/azurerm/resource_arm_network_interface_card_test.go
+++ b/azurerm/resource_arm_network_interface_card_test.go
@@ -116,6 +116,24 @@ func TestAccAzureRMNetworkInterface_withTags(t *testing.T) {
})
}
+func TestAccAzureRMNetworkInterface_bug7986(t *testing.T) {
+ rInt := acctest.RandInt()
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMNetworkInterfaceDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccAzureRMNetworkInterface_bug7986(rInt),
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test1"),
+ testCheckAzureRMNetworkInterfaceExists("azurerm_network_interface.test2"),
+ ),
+ },
+ },
+ })
+}
+
func testCheckAzureRMNetworkInterfaceExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
@@ -475,3 +493,150 @@ resource "azurerm_network_interface" "test2" {
}
`, rInt)
}
+
+func testAccAzureRMNetworkInterface_bug7986(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctest-%d"
+ location = "West Europe"
+}
+
+resource "azurerm_network_security_group" "test" {
+ name = "acctest-%d-nsg"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ tags {
+ environment = "Production"
+ }
+}
+
+resource "azurerm_network_security_rule" "test1" {
+ name = "test1"
+ priority = 101
+ direction = "Outbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ network_security_group_name = "${azurerm_network_security_group.test.name}"
+}
+
+resource "azurerm_network_security_rule" "test2" {
+ name = "test2"
+ priority = 102
+ direction = "Outbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "*"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ network_security_group_name = "${azurerm_network_security_group.test.name}"
+}
+
+resource "azurerm_public_ip" "test" {
+ name = "acctest-%d-pip"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ public_ip_address_allocation = "Dynamic"
+
+ tags {
+ environment = "Production"
+ }
+}
+
+resource "azurerm_virtual_network" "test" {
+ name = "acctest-%d-vn"
+ address_space = ["10.0.0.0/16"]
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+}
+
+resource "azurerm_subnet" "test" {
+ name = "first"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ virtual_network_name = "${azurerm_virtual_network.test.name}"
+ address_prefix = "10.0.2.0/24"
+}
+
+resource "azurerm_network_interface" "test1" {
+ name = "acctest-%d-nic1"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ ip_configuration {
+ name = "testconfiguration1"
+ subnet_id = "${azurerm_subnet.test.id}"
+ private_ip_address_allocation = "dynamic"
+ }
+
+ tags {
+ environment = "staging"
+ }
+}
+
+resource "azurerm_network_interface" "test2" {
+ name = "acctest-%d-nic2"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ ip_configuration {
+ name = "testconfiguration1"
+ subnet_id = "${azurerm_subnet.test.id}"
+ private_ip_address_allocation = "dynamic"
+ }
+
+ tags {
+ environment = "staging"
+ }
+}
+`, rInt, rInt, rInt, rInt, rInt, rInt)
+}
+
+func testAccAzureRMNetworkInterface_publicIP(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctest-rg-%d"
+ location = "West US"
+}
+
+resource "azurerm_virtual_network" "test" {
+ name = "acceptanceTestVirtualNetwork1"
+ address_space = ["10.0.0.0/16"]
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_subnet" "test" {
+ name = "testsubnet"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ virtual_network_name = "${azurerm_virtual_network.test.name}"
+ address_prefix = "10.0.2.0/24"
+}
+
+resource "azurerm_public_ip" "testext" {
+ name = "testpublicipext"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ public_ip_address_allocation = "static"
+}
+
+resource "azurerm_network_interface" "test" {
+ name = "acceptanceTestNetworkInterface1"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ ip_configuration {
+ name = "testconfiguration1"
+ subnet_id = "${azurerm_subnet.test.id}"
+ private_ip_address_allocation = "dynamic"
+ public_ip_address_id = "${azurerm_public_ip.testext.id}"
+ }
+}
+`, rInt)
+}
diff --git a/azurerm/resource_arm_network_security_group.go b/azurerm/resource_arm_network_security_group.go
index e8bc156f039b..f3e11b8c0d61 100644
--- a/azurerm/resource_arm_network_security_group.go
+++ b/azurerm/resource_arm_network_security_group.go
@@ -13,6 +13,8 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
+var networkSecurityGroupResourceName = "azurerm_network_security_group"
+
func resourceArmNetworkSecurityGroup() *schema.Resource {
return &schema.Resource{
Create: resourceArmNetworkSecurityGroupCreate,
@@ -137,6 +139,9 @@ func resourceArmNetworkSecurityGroupCreate(d *schema.ResourceData, meta interfac
return fmt.Errorf("Error Building list of Network Security Group Rules: %s", sgErr)
}
+ azureRMLockByName(name, networkSecurityGroupResourceName)
+ defer azureRMUnlockByName(name, networkSecurityGroupResourceName)
+
sg := network.SecurityGroup{
Name: &name,
Location: &location,
@@ -202,7 +207,7 @@ func resourceArmNetworkSecurityGroupRead(d *schema.ResourceData, meta interface{
d.Set("resource_group_name", resGroup)
d.Set("name", resp.Name)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
flattenAndSetTags(d, resp.Tags)
return nil
diff --git a/azurerm/resource_arm_network_security_rule.go b/azurerm/resource_arm_network_security_rule.go
index 470fb8c3e125..9ac09c296e02 100644
--- a/azurerm/resource_arm_network_security_rule.go
+++ b/azurerm/resource_arm_network_security_rule.go
@@ -34,6 +34,7 @@ func resourceArmNetworkSecurityRule() *schema.Resource {
"network_security_group_name": {
Type: schema.TypeString,
Required: true,
+ ForceNew: true,
},
"description": {
@@ -120,8 +121,8 @@ func resourceArmNetworkSecurityRuleCreate(d *schema.ResourceData, meta interface
direction := d.Get("direction").(string)
protocol := d.Get("protocol").(string)
- armMutexKV.Lock(nsgName)
- defer armMutexKV.Unlock(nsgName)
+ azureRMLockByName(nsgName, networkSecurityGroupResourceName)
+ defer azureRMUnlockByName(nsgName, networkSecurityGroupResourceName)
properties := network.SecurityRulePropertiesFormat{
SourcePortRange: &source_port_range,
@@ -155,8 +156,7 @@ func resourceArmNetworkSecurityRuleCreate(d *schema.ResourceData, meta interface
return err
}
if read.ID == nil {
- return fmt.Errorf("Cannot read Security Group Rule %s/%s (resource group %s) ID",
- nsgName, name, resGroup)
+ return fmt.Errorf("Cannot read Security Group Rule %s/%s (resource group %s) ID", nsgName, name, resGroup)
}
d.SetId(*read.ID)
@@ -211,8 +211,8 @@ func resourceArmNetworkSecurityRuleDelete(d *schema.ResourceData, meta interface
nsgName := id.Path["networkSecurityGroups"]
sgRuleName := id.Path["securityRules"]
- armMutexKV.Lock(nsgName)
- defer armMutexKV.Unlock(nsgName)
+ azureRMLockByName(nsgName, networkSecurityGroupResourceName)
+ defer azureRMUnlockByName(nsgName, networkSecurityGroupResourceName)
_, error := secRuleClient.Delete(resGroup, nsgName, sgRuleName, make(chan struct{}))
err = <-error
diff --git a/azurerm/resource_arm_public_ip.go b/azurerm/resource_arm_public_ip.go
index 934839d8ccc6..da34e72734d7 100644
--- a/azurerm/resource_arm_public_ip.go
+++ b/azurerm/resource_arm_public_ip.go
@@ -169,7 +169,7 @@ func resourceArmPublicIpRead(d *schema.ResourceData, meta interface{}) error {
}
d.Set("resource_group_name", resGroup)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
d.Set("name", resp.Name)
d.Set("public_ip_address_allocation", strings.ToLower(string(resp.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod)))
diff --git a/azurerm/resource_arm_redis_cache.go b/azurerm/resource_arm_redis_cache.go
index 93505ec91ffd..e643e33eaf26 100644
--- a/azurerm/resource_arm_redis_cache.go
+++ b/azurerm/resource_arm_redis_cache.go
@@ -3,14 +3,15 @@ package azurerm
import (
"fmt"
"log"
-
"net/http"
+ "strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/arm/redis"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/helper/validation"
"github.com/jen20/riviera/azure"
)
@@ -54,9 +55,13 @@ func resourceArmRedisCache() *schema.Resource {
},
"sku_name": {
- Type: schema.TypeString,
- Required: true,
- ValidateFunc: validateRedisSku,
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validation.StringInSlice([]string{
+ string(redis.Basic),
+ string(redis.Standard),
+ string(redis.Premium),
+ }, true),
DiffSuppressFunc: ignoreCaseDiffSuppressFunc,
},
@@ -78,19 +83,19 @@ func resourceArmRedisCache() *schema.Resource {
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"maxclients": {
- Type: schema.TypeString,
+ Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxmemory_delta": {
- Type: schema.TypeString,
+ Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"maxmemory_reserved": {
- Type: schema.TypeString,
+ Type: schema.TypeInt,
Optional: true,
Computed: true,
},
@@ -101,6 +106,23 @@ func resourceArmRedisCache() *schema.Resource {
Default: "volatile-lru",
ValidateFunc: validateRedisMaxMemoryPolicy,
},
+ "rdb_backup_enabled": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+ "rdb_backup_frequency": {
+ Type: schema.TypeInt,
+ Optional: true,
+ ValidateFunc: validateRedisBackupFrequency,
+ },
+ "rdb_backup_max_snapshot_count": {
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "rdb_storage_connection_string": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
},
},
},
@@ -364,39 +386,45 @@ func redisStateRefreshFunc(client redis.GroupClient, resourceGroupName string, s
}
func expandRedisConfiguration(d *schema.ResourceData) *map[string]*string {
- configuration := d.Get("redis_configuration").([]interface{})
-
output := make(map[string]*string)
- if configuration == nil {
- return &output
+ if v, ok := d.GetOk("redis_configuration.0.maxclients"); ok {
+ clients := strconv.Itoa(v.(int))
+ output["maxclients"] = azure.String(clients)
}
- // TODO: can we use this to remove the below? \/
- //config := configuration[0].(map[string]interface{})
+ if v, ok := d.GetOk("redis_configuration.0.maxmemory_delta"); ok {
+ delta := strconv.Itoa(v.(int))
+ output["maxmemory-delta"] = azure.String(delta)
+ }
- for _, v := range configuration {
- config := v.(map[string]interface{})
+ if v, ok := d.GetOk("redis_configuration.0.maxmemory_reserved"); ok {
+ delta := strconv.Itoa(v.(int))
+ output["maxmemory-reserved"] = azure.String(delta)
+ }
- maxClients := config["maxclients"].(string)
- if maxClients != "" {
- output["maxclients"] = azure.String(maxClients)
- }
+ if v, ok := d.GetOk("redis_configuration.0.maxmemory_policy"); ok {
+ output["maxmemory-policy"] = azure.String(v.(string))
+ }
- maxMemoryDelta := config["maxmemory_delta"].(string)
- if maxMemoryDelta != "" {
- output["maxmemory-delta"] = azure.String(maxMemoryDelta)
- }
+ // Backup
+ if v, ok := d.GetOk("redis_configuration.0.rdb_backup_enabled"); ok {
+ delta := strconv.FormatBool(v.(bool))
+ output["rdb-backup-enabled"] = azure.String(delta)
+ }
- maxMemoryReserved := config["maxmemory_reserved"].(string)
- if maxMemoryReserved != "" {
- output["maxmemory-reserved"] = azure.String(maxMemoryReserved)
- }
+ if v, ok := d.GetOk("redis_configuration.0.rdb_backup_frequency"); ok {
+ delta := strconv.Itoa(v.(int))
+ output["rdb-backup-frequency"] = azure.String(delta)
+ }
- maxMemoryPolicy := config["maxmemory_policy"].(string)
- if maxMemoryPolicy != "" {
- output["maxmemory-policy"] = azure.String(maxMemoryPolicy)
- }
+ if v, ok := d.GetOk("redis_configuration.0.rdb_backup_max_snapshot_count"); ok {
+ delta := strconv.Itoa(v.(int))
+ output["rdb-backup-max-snapshot-count"] = azure.String(delta)
+ }
+
+ if v, ok := d.GetOk("redis_configuration.0.rdb_storage_connection_string"); ok {
+ output["rdb-storage-connection-string"] = azure.String(v.(string))
}
return &output
@@ -411,6 +439,11 @@ func flattenRedisConfiguration(configuration *map[string]*string) map[string]*st
redisConfiguration["maxmemory_reserved"] = config["maxmemory-reserved"]
redisConfiguration["maxmemory_policy"] = config["maxmemory-policy"]
+ redisConfiguration["rdb_backup_enabled"] = config["rdb-backup-enabled"]
+ redisConfiguration["rdb_backup_frequency"] = config["rdb-backup-frequency"]
+ redisConfiguration["rdb_backup_max_snapshot_count"] = config["rdb-backup-max-snapshot-count"]
+ redisConfiguration["rdb_storage_connection_string"] = config["rdb-storage-connection-string"]
+
return redisConfiguration
}
@@ -445,16 +478,20 @@ func validateRedisMaxMemoryPolicy(v interface{}, k string) (ws []string, errors
return
}
-func validateRedisSku(v interface{}, k string) (ws []string, errors []error) {
- value := strings.ToLower(v.(string))
- skus := map[string]bool{
- "basic": true,
- "standard": true,
- "premium": true,
+func validateRedisBackupFrequency(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(int)
+ families := map[int]bool{
+ 15: true,
+ 30: true,
+ 60: true,
+ 360: true,
+ 720: true,
+ 1440: true,
}
- if !skus[value] {
- errors = append(errors, fmt.Errorf("Redis SKU can only be Basic, Standard or Premium"))
+ if !families[value] {
+ errors = append(errors, fmt.Errorf("Redis Backup Frequency can only be '15', '30', '60', '360', '720' or '1440'"))
}
+
return
}
diff --git a/azurerm/resource_arm_redis_cache_test.go b/azurerm/resource_arm_redis_cache_test.go
index dacafe135686..a69291da321f 100644
--- a/azurerm/resource_arm_redis_cache_test.go
+++ b/azurerm/resource_arm_redis_cache_test.go
@@ -77,41 +77,35 @@ func TestAccAzureRMRedisCacheMaxMemoryPolicy_validation(t *testing.T) {
}
}
-func TestAccAzureRMRedisCacheSku_validation(t *testing.T) {
+func TestAccAzureRMRedisCacheBackupFrequency_validation(t *testing.T) {
cases := []struct {
- Value string
+ Value int
ErrCount int
}{
- {
- Value: "Basic",
- ErrCount: 0,
- },
- {
- Value: "Standard",
- ErrCount: 0,
- },
- {
- Value: "Premium",
- ErrCount: 0,
- },
- {
- Value: "Random",
- ErrCount: 1,
- },
+ {Value: 1, ErrCount: 1},
+ {Value: 15, ErrCount: 0},
+ {Value: 30, ErrCount: 0},
+ {Value: 45, ErrCount: 1},
+ {Value: 60, ErrCount: 0},
+ {Value: 120, ErrCount: 1},
+ {Value: 240, ErrCount: 1},
+ {Value: 360, ErrCount: 0},
+ {Value: 720, ErrCount: 0},
+ {Value: 1440, ErrCount: 0},
}
for _, tc := range cases {
- _, errors := validateRedisSku(tc.Value, "azurerm_redis_cache")
+ _, errors := validateRedisBackupFrequency(tc.Value, "azurerm_redis_cache")
if len(errors) != tc.ErrCount {
- t.Fatalf("Expected the Azure RM Redis Cache Sku to trigger a validation error")
+ t.Fatalf("Expected the AzureRM Redis Cache Backup Frequency to trigger a validation error for '%d'", tc.Value)
}
}
}
func TestAccAzureRMRedisCache_basic(t *testing.T) {
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMRedisCache_basic, ri, ri)
+ config := testAccAzureRMRedisCache_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -130,7 +124,7 @@ func TestAccAzureRMRedisCache_basic(t *testing.T) {
func TestAccAzureRMRedisCache_standard(t *testing.T) {
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMRedisCache_standard, ri, ri)
+ config := testAccAzureRMRedisCache_standard(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -149,7 +143,7 @@ func TestAccAzureRMRedisCache_standard(t *testing.T) {
func TestAccAzureRMRedisCache_premium(t *testing.T) {
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMRedisCache_premium, ri, ri)
+ config := testAccAzureRMRedisCache_premium(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -168,7 +162,7 @@ func TestAccAzureRMRedisCache_premium(t *testing.T) {
func TestAccAzureRMRedisCache_premiumSharded(t *testing.T) {
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMRedisCache_premiumSharded, ri, ri)
+ config := testAccAzureRMRedisCache_premiumSharded(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -194,14 +188,14 @@ func TestAccAzureRMRedisCache_NonStandardCasing(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMRedisCacheDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"),
),
},
- resource.TestStep{
+ {
Config: config,
PlanOnly: true,
ExpectNonEmptyPlan: false,
@@ -210,6 +204,72 @@ func TestAccAzureRMRedisCache_NonStandardCasing(t *testing.T) {
})
}
+func TestAccAzureRMRedisCache_BackupDisabled(t *testing.T) {
+ ri := acctest.RandInt()
+ config := testAccAzureRMRedisCacheBackupDisabled(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMRedisCacheDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMRedisCache_BackupEnabled(t *testing.T) {
+ ri := acctest.RandInt()
+ rs := acctest.RandString(4)
+ config := testAccAzureRMRedisCacheBackupEnabled(ri, rs)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMRedisCacheDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMRedisCache_BackupEnabledDisabled(t *testing.T) {
+ ri := acctest.RandInt()
+ rs := acctest.RandString(4)
+ config := testAccAzureRMRedisCacheBackupEnabled(ri, rs)
+ updatedConfig := testAccAzureRMRedisCacheBackupDisabled(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMRedisCacheDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"),
+ ),
+ },
+ {
+ Config: updatedConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMRedisCacheExists("azurerm_redis_cache.test"),
+ ),
+ },
+ },
+ })
+}
+
func testCheckAzureRMRedisCacheExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
@@ -264,7 +324,8 @@ func testCheckAzureRMRedisCacheDestroy(s *terraform.State) error {
return nil
}
-var testAccAzureRMRedisCache_basic = `
+func testAccAzureRMRedisCache_basic(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -283,9 +344,11 @@ resource "azurerm_redis_cache" "test" {
maxclients = "256"
}
}
-`
+`, rInt, rInt)
+}
-var testAccAzureRMRedisCache_standard = `
+func testAccAzureRMRedisCache_standard(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -307,9 +370,11 @@ resource "azurerm_redis_cache" "test" {
environment = "production"
}
}
-`
+`, rInt, rInt)
+}
-var testAccAzureRMRedisCache_premium = `
+func testAccAzureRMRedisCache_premium(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -324,15 +389,17 @@ resource "azurerm_redis_cache" "test" {
sku_name = "Premium"
enable_non_ssl_port = false
redis_configuration {
- maxclients = "256",
- maxmemory_reserved = "2",
- maxmemory_delta = "2"
+ maxclients = 256,
+ maxmemory_reserved = 2,
+ maxmemory_delta = 2
maxmemory_policy = "allkeys-lru"
}
}
-`
+`, rInt, rInt)
+}
-var testAccAzureRMRedisCache_premiumSharded = `
+func testAccAzureRMRedisCache_premiumSharded(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -349,12 +416,13 @@ resource "azurerm_redis_cache" "test" {
shard_count = 3
redis_configuration {
maxclients = "256",
- maxmemory_reserved = "2",
- maxmemory_delta = "2"
+ maxmemory_reserved = 2,
+ maxmemory_delta = 2
maxmemory_policy = "allkeys-lru"
}
}
-`
+`, rInt, rInt)
+}
func testAccAzureRMRedisCacheNonStandardCasing(ri int) string {
return fmt.Sprintf(`
@@ -376,3 +444,61 @@ resource "azurerm_redis_cache" "test" {
}
`, ri, ri)
}
+
+func testAccAzureRMRedisCacheBackupDisabled(ri int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_redis_cache" "test" {
+ name = "acctestRedis-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ capacity = 3
+ family = "P"
+ sku_name = "Premium"
+ enable_non_ssl_port = false
+ redis_configuration {
+ maxclients = "256"
+ rdb_backup_enabled = false
+ }
+}
+`, ri, ri)
+}
+
+func testAccAzureRMRedisCacheBackupEnabled(ri int, rs string) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+resource "azurerm_storage_account" "test" {
+ name = "unlikely23exst2acct%s"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ location = "westus"
+ account_type = "Standard_GRS"
+
+ tags {
+ environment = "staging"
+ }
+}
+resource "azurerm_redis_cache" "test" {
+ name = "acctestRedis-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ capacity = 3
+ family = "P"
+ sku_name = "Premium"
+ enable_non_ssl_port = false
+ redis_configuration {
+ maxclients = "256"
+ rdb_backup_enabled = true
+ rdb_backup_frequency = 60
+ rdb_backup_max_snapshot_count = 1
+ rdb_storage_connection_string = "DefaultEndpointsProtocol=https;BlobEndpoint=${azurerm_storage_account.test.primary_blob_endpoint};AccountName=${azurerm_storage_account.test.name};AccountKey=${azurerm_storage_account.test.primary_access_key}"
+ }
+}
+`, ri, rs, ri)
+}
diff --git a/azurerm/resource_arm_resource_group.go b/azurerm/resource_arm_resource_group.go
index 7d71f76077ec..679b5aa66ae5 100644
--- a/azurerm/resource_arm_resource_group.go
+++ b/azurerm/resource_arm_resource_group.go
@@ -139,7 +139,7 @@ func resourceArmResourceGroupRead(d *schema.ResourceData, meta interface{}) erro
resp := readResponse.Parsed.(*azure.GetResourceGroupResponse)
d.Set("name", resp.Name)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
flattenAndSetTags(d, resp.Tags)
return nil
diff --git a/azurerm/resource_arm_route.go b/azurerm/resource_arm_route.go
index 0f0899ac1091..9f01662bcda9 100644
--- a/azurerm/resource_arm_route.go
+++ b/azurerm/resource_arm_route.go
@@ -72,8 +72,8 @@ func resourceArmRouteCreate(d *schema.ResourceData, meta interface{}) error {
addressPrefix := d.Get("address_prefix").(string)
nextHopType := d.Get("next_hop_type").(string)
- armMutexKV.Lock(rtName)
- defer armMutexKV.Unlock(rtName)
+ azureRMLockByName(rtName, routeTableResourceName)
+ defer azureRMUnlockByName(rtName, routeTableResourceName)
properties := network.RoutePropertiesFormat{
AddressPrefix: &addressPrefix,
@@ -153,8 +153,8 @@ func resourceArmRouteDelete(d *schema.ResourceData, meta interface{}) error {
rtName := id.Path["routeTables"]
routeName := id.Path["routes"]
- armMutexKV.Lock(rtName)
- defer armMutexKV.Unlock(rtName)
+ azureRMLockByName(rtName, routeTableResourceName)
+ defer azureRMUnlockByName(rtName, routeTableResourceName)
_, error := routesClient.Delete(resGroup, rtName, routeName, make(chan struct{}))
err = <-error
diff --git a/azurerm/resource_arm_route_table.go b/azurerm/resource_arm_route_table.go
index bca0c29ddbc4..3daf6a7bcd33 100644
--- a/azurerm/resource_arm_route_table.go
+++ b/azurerm/resource_arm_route_table.go
@@ -12,6 +12,8 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
+var routeTableResourceName = "azurerm_route_table"
+
func resourceArmRouteTable() *schema.Resource {
return &schema.Resource{
Create: resourceArmRouteTableCreate,
@@ -151,7 +153,7 @@ func resourceArmRouteTableRead(d *schema.ResourceData, meta interface{}) error {
d.Set("name", name)
d.Set("resource_group_name", resGroup)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
if resp.RouteTablePropertiesFormat.Routes != nil {
d.Set("route", schema.NewSet(resourceArmRouteTableRouteHash, flattenAzureRmRouteTableRoutes(resp.RouteTablePropertiesFormat.Routes)))
diff --git a/azurerm/resource_arm_servicebus_namespace.go b/azurerm/resource_arm_servicebus_namespace.go
index d9c93cf48019..fe11c5e99b45 100644
--- a/azurerm/resource_arm_servicebus_namespace.go
+++ b/azurerm/resource_arm_servicebus_namespace.go
@@ -8,6 +8,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/servicebus"
"github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/helper/validation"
)
// Default Authorization Rule/Policy created by Azure, used to populate the
@@ -40,10 +41,14 @@ func resourceArmServiceBusNamespace() *schema.Resource {
},
"sku": {
- Type: schema.TypeString,
- Required: true,
- ForceNew: true,
- ValidateFunc: validateServiceBusNamespaceSku,
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validation.StringInSlice([]string{
+ string(servicebus.Basic),
+ string(servicebus.Standard),
+ string(servicebus.Premium),
+ }, true),
DiffSuppressFunc: ignoreCaseDiffSuppressFunc,
},
@@ -83,7 +88,7 @@ func resourceArmServiceBusNamespace() *schema.Resource {
func resourceArmServiceBusNamespaceCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
namespaceClient := client.serviceBusNamespacesClient
- log.Printf("[INFO] preparing arguments for Azure ARM ServiceBus Namespace creation.")
+ log.Printf("[INFO] preparing arguments for AzureRM ServiceBus Namespace creation.")
name := d.Get("name").(string)
location := d.Get("location").(string)
@@ -134,7 +139,7 @@ func resourceArmServiceBusNamespaceRead(d *schema.ResourceData, meta interface{}
resp, err := namespaceClient.Get(resGroup, name)
if err != nil {
- return fmt.Errorf("Error making Read request on Azure ServiceBus Namespace %s: %+v", name, err)
+ return fmt.Errorf("Error making Read request on Azure ServiceBus Namespace '%s': %+v", name, err)
}
if resp.StatusCode == http.StatusNotFound {
d.SetId("")
@@ -177,26 +182,12 @@ func resourceArmServiceBusNamespaceDelete(d *schema.ResourceData, meta interface
err = <-error
if resp.StatusCode != http.StatusNotFound {
- return fmt.Errorf("Error issuing Azure ARM delete request of ServiceBus Namespace'%s': %+v", name, err)
+ return fmt.Errorf("Error issuing Azure ARM delete request of ServiceBus Namespace '%s': %+v", name, err)
}
return nil
}
-func validateServiceBusNamespaceSku(v interface{}, k string) (ws []string, errors []error) {
- value := strings.ToLower(v.(string))
- skus := map[string]bool{
- "basic": true,
- "standard": true,
- "premium": true,
- }
-
- if !skus[value] {
- errors = append(errors, fmt.Errorf("ServiceBus Namespace SKU can only be Basic, Standard or Premium"))
- }
- return
-}
-
func validateServiceBusNamespaceCapacity(v interface{}, k string) (ws []string, errors []error) {
value := v.(int)
capacities := map[int]bool{
diff --git a/azurerm/resource_arm_servicebus_namespace_test.go b/azurerm/resource_arm_servicebus_namespace_test.go
index 0e0942e5aeac..6366fafd27b3 100644
--- a/azurerm/resource_arm_servicebus_namespace_test.go
+++ b/azurerm/resource_arm_servicebus_namespace_test.go
@@ -43,52 +43,20 @@ func TestAccAzureRMServiceBusNamespaceCapacity_validation(t *testing.T) {
}
}
-func TestAccAzureRMServiceBusNamespaceSku_validation(t *testing.T) {
- cases := []struct {
- Value string
- ErrCount int
- }{
- {
- Value: "Basic",
- ErrCount: 0,
- },
- {
- Value: "Standard",
- ErrCount: 0,
- },
- {
- Value: "Premium",
- ErrCount: 0,
- },
- {
- Value: "Random",
- ErrCount: 1,
- },
- }
-
- for _, tc := range cases {
- _, errors := validateServiceBusNamespaceSku(tc.Value, "azurerm_servicebus_namespace")
-
- if len(errors) != tc.ErrCount {
- t.Fatalf("Expected the Azure RM ServiceBus Namespace Sku to trigger a validation error")
- }
- }
-}
-
func TestAccAzureRMServiceBusNamespace_basic(t *testing.T) {
-
+ resourceName := "azurerm_servicebus_namespace.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMServiceBusNamespace_basic, ri, ri)
+ config := testAccAzureRMServiceBusNamespace_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMServiceBusNamespaceExists("azurerm_servicebus_namespace.test"),
+ testCheckAzureRMServiceBusNamespaceExists(resourceName),
),
},
},
@@ -96,26 +64,27 @@ func TestAccAzureRMServiceBusNamespace_basic(t *testing.T) {
}
func TestAccAzureRMServiceBusNamespace_readDefaultKeys(t *testing.T) {
+ resourceName := "azurerm_servicebus_namespace.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMServiceBusNamespace_basic, ri, ri)
+ config := testAccAzureRMServiceBusNamespace_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMServiceBusNamespaceExists("azurerm_servicebus_namespace.test"),
+ testCheckAzureRMServiceBusNamespaceExists(resourceName),
resource.TestMatchResourceAttr(
- "azurerm_servicebus_namespace.test", "default_primary_connection_string", regexp.MustCompile("Endpoint=.+")),
+ resourceName, "default_primary_connection_string", regexp.MustCompile("Endpoint=.+")),
resource.TestMatchResourceAttr(
- "azurerm_servicebus_namespace.test", "default_secondary_connection_string", regexp.MustCompile("Endpoint=.+")),
+ resourceName, "default_secondary_connection_string", regexp.MustCompile("Endpoint=.+")),
resource.TestMatchResourceAttr(
- "azurerm_servicebus_namespace.test", "default_primary_key", regexp.MustCompile(".+")),
+ resourceName, "default_primary_key", regexp.MustCompile(".+")),
resource.TestMatchResourceAttr(
- "azurerm_servicebus_namespace.test", "default_secondary_key", regexp.MustCompile(".+")),
+ resourceName, "default_secondary_key", regexp.MustCompile(".+")),
),
},
},
@@ -123,6 +92,7 @@ func TestAccAzureRMServiceBusNamespace_readDefaultKeys(t *testing.T) {
}
func TestAccAzureRMServiceBusNamespace_NonStandardCasing(t *testing.T) {
+ resourceName := "azurerm_servicebus_namespace.test"
ri := acctest.RandInt()
config := testAccAzureRMServiceBusNamespaceNonStandardCasing(ri)
@@ -132,13 +102,13 @@ func TestAccAzureRMServiceBusNamespace_NonStandardCasing(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMServiceBusNamespaceExists("azurerm_servicebus_namespace.test"),
+ testCheckAzureRMServiceBusNamespaceExists(resourceName),
),
},
- resource.TestStep{
+ {
Config: config,
PlanOnly: true,
ExpectNonEmptyPlan: false,
@@ -201,18 +171,20 @@ func testCheckAzureRMServiceBusNamespaceExists(name string) resource.TestCheckFu
}
}
-var testAccAzureRMServiceBusNamespace_basic = `
+func testAccAzureRMServiceBusNamespace_basic(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
}
resource "azurerm_servicebus_namespace" "test" {
name = "acctestservicebusnamespace-%d"
- location = "West US"
+ location = "${azurerm_resource_group.test.location}"
resource_group_name = "${azurerm_resource_group.test.name}"
sku = "basic"
}
-`
+`, rInt, rInt)
+}
func testAccAzureRMServiceBusNamespaceNonStandardCasing(ri int) string {
return fmt.Sprintf(`
@@ -222,7 +194,7 @@ resource "azurerm_resource_group" "test" {
}
resource "azurerm_servicebus_namespace" "test" {
name = "acctestservicebusnamespace-%d"
- location = "West US"
+ location = "${azurerm_resource_group.test.location}"
resource_group_name = "${azurerm_resource_group.test.name}"
sku = "Basic"
}
diff --git a/azurerm/resource_arm_servicebus_queue.go b/azurerm/resource_arm_servicebus_queue.go
new file mode 100644
index 000000000000..3b8be54402c5
--- /dev/null
+++ b/azurerm/resource_arm_servicebus_queue.go
@@ -0,0 +1,255 @@
+package azurerm
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/arm/servicebus"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceArmServiceBusQueue() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceArmServiceBusQueueCreateUpdate,
+ Read: resourceArmServiceBusQueueRead,
+ Update: resourceArmServiceBusQueueCreateUpdate,
+ Delete: resourceArmServiceBusQueueDelete,
+ Importer: &schema.ResourceImporter{
+ State: schema.ImportStatePassthrough,
+ },
+
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "namespace_name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "location": locationSchema(),
+
+ "resource_group_name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+
+ "auto_delete_on_idle": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "default_message_ttl": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "duplicate_detection_history_time_window": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+
+ "enable_batched_operations": {
+ Type: schema.TypeBool,
+ Default: false,
+ Optional: true,
+ },
+
+ "enable_express": {
+ Type: schema.TypeBool,
+ Default: false,
+ Optional: true,
+ },
+
+ "enable_partitioning": {
+ Type: schema.TypeBool,
+ Default: false,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "max_size_in_megabytes": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true,
+ },
+
+ "requires_duplicate_detection": {
+ Type: schema.TypeBool,
+ Default: false,
+ Optional: true,
+ ForceNew: true,
+ },
+
+ "support_ordering": {
+ Type: schema.TypeBool,
+ Default: false,
+ Optional: true,
+ },
+ },
+ }
+}
+
+func resourceArmServiceBusQueueCreateUpdate(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient).serviceBusQueuesClient
+ log.Printf("[INFO] preparing arguments for AzureRM ServiceBus Queue creation/update.")
+
+ name := d.Get("name").(string)
+ namespaceName := d.Get("namespace_name").(string)
+ location := d.Get("location").(string)
+ resGroup := d.Get("resource_group_name").(string)
+
+ enableBatchedOps := d.Get("enable_batched_operations").(bool)
+ enableExpress := d.Get("enable_express").(bool)
+ enablePartitioning := d.Get("enable_partitioning").(bool)
+ maxSize := int64(d.Get("max_size_in_megabytes").(int))
+ requiresDuplicateDetection := d.Get("requires_duplicate_detection").(bool)
+ supportOrdering := d.Get("support_ordering").(bool)
+
+ parameters := servicebus.QueueCreateOrUpdateParameters{
+ Name: &name,
+ Location: &location,
+ QueueProperties: &servicebus.QueueProperties{
+ EnableBatchedOperations: &enableBatchedOps,
+ EnableExpress: &enableExpress,
+ EnablePartitioning: &enablePartitioning,
+ MaxSizeInMegabytes: &maxSize,
+ RequiresDuplicateDetection: &requiresDuplicateDetection,
+ SupportOrdering: &supportOrdering,
+ },
+ }
+
+ if autoDeleteOnIdle := d.Get("auto_delete_on_idle").(string); autoDeleteOnIdle != "" {
+ parameters.QueueProperties.AutoDeleteOnIdle = &autoDeleteOnIdle
+ }
+
+ if defaultTTL := d.Get("default_message_ttl").(string); defaultTTL != "" {
+ parameters.QueueProperties.DefaultMessageTimeToLive = &defaultTTL
+ }
+
+ if duplicateWindow := d.Get("duplicate_detection_history_time_window").(string); duplicateWindow != "" {
+ parameters.QueueProperties.DuplicateDetectionHistoryTimeWindow = &duplicateWindow
+ }
+
+ // We need to retrieve the namespace because Premium namespace works differently from Basic and Standard,
+ // so it needs different rules applied to it.
+ namespace, nsErr := meta.(*ArmClient).serviceBusNamespacesClient.Get(resGroup, namespaceName)
+ if nsErr != nil {
+ return nsErr
+ }
+
+ // Enforce Premium namespace to have partitioning enabled in Terraform. It is always enabled in Azure for
+ // Premium SKU.
+ if namespace.Sku.Name == servicebus.Premium && !d.Get("enable_partitioning").(bool) {
+ return fmt.Errorf("ServiceBus Queue (%s) must have Partitioning enabled for Premium SKU", name)
+ }
+
+ // Enforce Premium namespace to have Express Entities disabled in Terraform since they are not supported for
+ // Premium SKU.
+ if namespace.Sku.Name == servicebus.Premium && d.Get("enable_express").(bool) {
+ return fmt.Errorf("ServiceBus Queue (%s) does not support Express Entities in Premium SKU and must be disabled", name)
+ }
+
+ _, err := client.CreateOrUpdate(resGroup, namespaceName, name, parameters)
+ if err != nil {
+ return err
+ }
+
+ read, err := client.Get(resGroup, namespaceName, name)
+ if err != nil {
+ return err
+ }
+ if read.ID == nil {
+ return fmt.Errorf("Cannot read ServiceBus Queue %s (resource group %s) ID", name, resGroup)
+ }
+
+ d.SetId(*read.ID)
+
+ return resourceArmServiceBusQueueRead(d, meta)
+}
+
+func resourceArmServiceBusQueueRead(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient).serviceBusQueuesClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ namespaceName := id.Path["namespaces"]
+ name := id.Path["queues"]
+
+ resp, err := client.Get(resGroup, namespaceName, name)
+ if err != nil {
+ return fmt.Errorf("Error making Read request on Azure ServiceBus Queue %s: %s", name, err)
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ d.SetId("")
+ return nil
+ }
+
+ d.Set("name", resp.Name)
+ d.Set("resource_group_name", resGroup)
+ d.Set("namespace_name", namespaceName)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
+
+ if resp.QueueProperties == nil {
+ return fmt.Errorf("Missing QueueProperties in response for Azure ServiceBus Queue %s: %s", name, err)
+ }
+
+ props := resp.QueueProperties
+ d.Set("auto_delete_on_idle", props.AutoDeleteOnIdle)
+ d.Set("default_message_ttl", props.DefaultMessageTimeToLive)
+ d.Set("duplicate_detection_history_time_window", props.DuplicateDetectionHistoryTimeWindow)
+
+ d.Set("enable_batched_operations", props.EnableBatchedOperations)
+ d.Set("enable_express", props.EnableExpress)
+ d.Set("enable_partitioning", props.EnablePartitioning)
+ d.Set("requires_duplicate_detection", props.RequiresDuplicateDetection)
+ d.Set("support_ordering", props.SupportOrdering)
+
+ maxSize := int(*props.MaxSizeInMegabytes)
+
+ // If the queue is NOT in a premium namespace (ie. it is Basic or Standard) and partitioning is enabled
+ // then the max size returned by the API will be 16 times greater than the value set.
+ if *props.EnablePartitioning {
+ namespace, err := meta.(*ArmClient).serviceBusNamespacesClient.Get(resGroup, namespaceName)
+ if err != nil {
+ return err
+ }
+
+ if namespace.Sku.Name != servicebus.Premium {
+ const partitionCount = 16
+ maxSize = int(*props.MaxSizeInMegabytes / partitionCount)
+ }
+ }
+
+ d.Set("max_size_in_megabytes", maxSize)
+
+ return nil
+}
+
+func resourceArmServiceBusQueueDelete(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*ArmClient).serviceBusQueuesClient
+
+ id, err := parseAzureResourceID(d.Id())
+ if err != nil {
+ return err
+ }
+ resGroup := id.ResourceGroup
+ namespaceName := id.Path["namespaces"]
+ name := id.Path["queues"]
+
+ _, err = client.Delete(resGroup, namespaceName, name)
+
+ return err
+}
diff --git a/azurerm/resource_arm_servicebus_queue_test.go b/azurerm/resource_arm_servicebus_queue_test.go
new file mode 100644
index 000000000000..75733d079cac
--- /dev/null
+++ b/azurerm/resource_arm_servicebus_queue_test.go
@@ -0,0 +1,326 @@
+package azurerm
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/acctest"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAzureRMServiceBusQueue_basic(t *testing.T) {
+ resourceName := "azurerm_servicebus_queue.test"
+ ri := acctest.RandInt()
+ config := testAccAzureRMServiceBusQueue_basic(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusQueueDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusQueueExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "enable_batched_operations", "false"),
+ resource.TestCheckResourceAttr(resourceName, "enable_express", "false"),
+ resource.TestCheckResourceAttr(resourceName, "enable_partitioning", "false"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMServiceBusQueue_update(t *testing.T) {
+ resourceName := "azurerm_servicebus_queue.test"
+ ri := acctest.RandInt()
+ preConfig := testAccAzureRMServiceBusQueue_basic(ri)
+ postConfig := testAccAzureRMServiceBusQueue_update(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusQueueDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: preConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusQueueExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "enable_batched_operations", "false"),
+ resource.TestCheckResourceAttr(resourceName, "enable_express", "false"),
+ ),
+ },
+ {
+ Config: postConfig,
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "enable_batched_operations", "true"),
+ resource.TestCheckResourceAttr(resourceName, "enable_express", "true"),
+ resource.TestCheckResourceAttr(resourceName, "max_size_in_megabytes", "2048"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMServiceBusQueue_enablePartitioningStandard(t *testing.T) {
+ resourceName := "azurerm_servicebus_queue.test"
+ ri := acctest.RandInt()
+ preConfig := testAccAzureRMServiceBusQueue_basic(ri)
+ postConfig := testAccAzureRMServiceBusQueue_enablePartitioningStandard(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusQueueDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: preConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusQueueExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "enable_partitioning", "false"),
+ ),
+ },
+ {
+ Config: postConfig,
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "enable_partitioning", "true"),
+ // Ensure size is read back in it's original value and not the x16 value returned by Azure
+ resource.TestCheckResourceAttr(resourceName, "max_size_in_megabytes", "5120"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMServiceBusQueue_defaultEnablePartitioningPremium(t *testing.T) {
+ resourceName := "azurerm_servicebus_queue.test"
+ ri := acctest.RandInt()
+ config := testAccAzureRMServiceBusQueue_Premium(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusQueueDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusQueueExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "enable_partitioning", "true"),
+ resource.TestCheckResourceAttr(resourceName, "enable_express", "false"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMServiceBusQueue_enableDuplicateDetection(t *testing.T) {
+ resourceName := "azurerm_servicebus_queue.test"
+ ri := acctest.RandInt()
+ preConfig := testAccAzureRMServiceBusQueue_basic(ri)
+ postConfig := testAccAzureRMServiceBusQueue_enableDuplicateDetection(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusQueueDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: preConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusQueueExists(resourceName),
+ resource.TestCheckResourceAttr(resourceName, "requires_duplicate_detection", "false"),
+ ),
+ },
+ {
+ Config: postConfig,
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(resourceName, "requires_duplicate_detection", "true"),
+ ),
+ },
+ },
+ })
+}
+
+func testCheckAzureRMServiceBusQueueDestroy(s *terraform.State) error {
+ client := testAccProvider.Meta().(*ArmClient).serviceBusQueuesClient
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "azurerm_servicebus_queue" {
+ continue
+ }
+
+ name := rs.Primary.Attributes["name"]
+ namespaceName := rs.Primary.Attributes["namespace_name"]
+ resourceGroup := rs.Primary.Attributes["resource_group_name"]
+
+ resp, err := client.Get(resourceGroup, namespaceName, name)
+ if err != nil {
+ if resp.StatusCode == http.StatusNotFound {
+ return nil
+ }
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNotFound {
+ return fmt.Errorf("ServiceBus Queue still exists:\n%#v", resp.QueueProperties)
+ }
+ }
+
+ return nil
+}
+
+func testCheckAzureRMServiceBusQueueExists(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ // Ensure we have enough information in state to look up in API
+ rs, ok := s.RootModule().Resources[name]
+ if !ok {
+ return fmt.Errorf("Not found: %s", name)
+ }
+
+ queueName := rs.Primary.Attributes["name"]
+ namespaceName := rs.Primary.Attributes["namespace_name"]
+ resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
+ if !hasResourceGroup {
+ return fmt.Errorf("Bad: no resource group found in state for queue: %s", queueName)
+ }
+
+ client := testAccProvider.Meta().(*ArmClient).serviceBusQueuesClient
+
+ resp, err := client.Get(resourceGroup, namespaceName, queueName)
+ if err != nil {
+ return fmt.Errorf("Bad: Get on serviceBusQueuesClient: %s", err)
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("Bad: Queue %q (resource group: %q) does not exist", namespaceName, resourceGroup)
+ }
+
+ return nil
+ }
+}
+
+func testAccAzureRMServiceBusQueue_basic(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+
+resource "azurerm_servicebus_namespace" "test" {
+ name = "acctestservicebusnamespace-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ sku = "standard"
+}
+
+resource "azurerm_servicebus_queue" "test" {
+ name = "acctestservicebusqueue-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ namespace_name = "${azurerm_servicebus_namespace.test.name}"
+}
+`, rInt, rInt, rInt)
+}
+
+func testAccAzureRMServiceBusQueue_Premium(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+
+resource "azurerm_servicebus_namespace" "test" {
+ name = "acctestservicebusnamespace-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ sku = "premium"
+}
+
+resource "azurerm_servicebus_queue" "test" {
+ name = "acctestservicebusqueue-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ namespace_name = "${azurerm_servicebus_namespace.test.name}"
+ enable_partitioning = true
+ enable_express = false
+}
+`, rInt, rInt, rInt)
+}
+
+func testAccAzureRMServiceBusQueue_update(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+
+resource "azurerm_servicebus_namespace" "test" {
+ name = "acctestservicebusnamespace-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ sku = "standard"
+}
+
+resource "azurerm_servicebus_queue" "test" {
+ name = "acctestservicebusqueue-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ namespace_name = "${azurerm_servicebus_namespace.test.name}"
+ enable_batched_operations = true
+ enable_express = true
+ max_size_in_megabytes = 2048
+}
+`, rInt, rInt, rInt)
+}
+
+func testAccAzureRMServiceBusQueue_enablePartitioningStandard(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+
+resource "azurerm_servicebus_namespace" "test" {
+ name = "acctestservicebusnamespace-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ sku = "standard"
+}
+
+resource "azurerm_servicebus_queue" "test" {
+ name = "acctestservicebusqueue-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ namespace_name = "${azurerm_servicebus_namespace.test.name}"
+ enable_partitioning = true
+ max_size_in_megabytes = 5120
+}
+`, rInt, rInt, rInt)
+}
+
+func testAccAzureRMServiceBusQueue_enableDuplicateDetection(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+
+resource "azurerm_servicebus_namespace" "test" {
+ name = "acctestservicebusnamespace-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ sku = "standard"
+}
+
+resource "azurerm_servicebus_queue" "test" {
+ name = "acctestservicebusqueue-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ namespace_name = "${azurerm_servicebus_namespace.test.name}"
+ requires_duplicate_detection = true
+}
+`, rInt, rInt, rInt)
+}
diff --git a/azurerm/resource_arm_servicebus_topic.go b/azurerm/resource_arm_servicebus_topic.go
index 982b8ea73cdf..cd2d7b69e631 100644
--- a/azurerm/resource_arm_servicebus_topic.go
+++ b/azurerm/resource_arm_servicebus_topic.go
@@ -7,6 +7,8 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/servicebus"
"github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/helper/validation"
+ "github.com/jen20/riviera/azure"
)
func resourceArmServiceBusTopic() *schema.Resource {
@@ -40,6 +42,17 @@ func resourceArmServiceBusTopic() *schema.Resource {
ForceNew: true,
},
+ "status": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: string(servicebus.EntityStatusActive),
+ ValidateFunc: validation.StringInSlice([]string{
+ string(servicebus.EntityStatusActive),
+ string(servicebus.EntityStatusDisabled),
+ }, true),
+ DiffSuppressFunc: ignoreCaseDiffSuppressFunc,
+ },
+
"auto_delete_on_idle": {
Type: schema.TypeString,
Optional: true,
@@ -107,41 +120,43 @@ func resourceArmServiceBusTopicCreate(d *schema.ResourceData, meta interface{})
namespaceName := d.Get("namespace_name").(string)
location := d.Get("location").(string)
resGroup := d.Get("resource_group_name").(string)
+ status := d.Get("status").(string)
+
+ enableBatchedOps := d.Get("enable_batched_operations").(bool)
+ enableExpress := d.Get("enable_express").(bool)
+ enableFiltering := d.Get("enable_filtering_messages_before_publishing").(bool)
+ enablePartitioning := d.Get("enable_partitioning").(bool)
+ maxSize := int64(d.Get("max_size_in_megabytes").(int))
+ requiresDuplicateDetection := d.Get("requires_duplicate_detection").(bool)
+ supportOrdering := d.Get("support_ordering").(bool)
parameters := servicebus.TopicCreateOrUpdateParameters{
- Name: &name,
- Location: &location,
- TopicProperties: &servicebus.TopicProperties{},
+ Name: &name,
+ Location: &location,
+ TopicProperties: &servicebus.TopicProperties{
+ Status: servicebus.EntityStatus(status),
+ EnableBatchedOperations: azure.Bool(enableBatchedOps),
+ EnableExpress: azure.Bool(enableExpress),
+ FilteringMessagesBeforePublishing: azure.Bool(enableFiltering),
+ EnablePartitioning: azure.Bool(enablePartitioning),
+ MaxSizeInMegabytes: azure.Int64(maxSize),
+ RequiresDuplicateDetection: azure.Bool(requiresDuplicateDetection),
+ SupportOrdering: azure.Bool(supportOrdering),
+ },
}
if autoDeleteOnIdle := d.Get("auto_delete_on_idle").(string); autoDeleteOnIdle != "" {
- parameters.TopicProperties.AutoDeleteOnIdle = &autoDeleteOnIdle
+ parameters.TopicProperties.AutoDeleteOnIdle = azure.String(autoDeleteOnIdle)
}
if defaultTTL := d.Get("default_message_ttl").(string); defaultTTL != "" {
- parameters.TopicProperties.DefaultMessageTimeToLive = &defaultTTL
+ parameters.TopicProperties.DefaultMessageTimeToLive = azure.String(defaultTTL)
}
if duplicateWindow := d.Get("duplicate_detection_history_time_window").(string); duplicateWindow != "" {
- parameters.TopicProperties.DuplicateDetectionHistoryTimeWindow = &duplicateWindow
+ parameters.TopicProperties.DuplicateDetectionHistoryTimeWindow = azure.String(duplicateWindow)
}
- enableBatchedOps := d.Get("enable_batched_operations").(bool)
- enableExpress := d.Get("enable_express").(bool)
- enableFiltering := d.Get("enable_filtering_messages_before_publishing").(bool)
- enablePartitioning := d.Get("enable_partitioning").(bool)
- maxSize := int64(d.Get("max_size_in_megabytes").(int))
- requiresDuplicateDetection := d.Get("requires_duplicate_detection").(bool)
- supportOrdering := d.Get("support_ordering").(bool)
-
- parameters.TopicProperties.EnableBatchedOperations = &enableBatchedOps
- parameters.TopicProperties.EnableExpress = &enableExpress
- parameters.TopicProperties.FilteringMessagesBeforePublishing = &enableFiltering
- parameters.TopicProperties.EnablePartitioning = &enablePartitioning
- parameters.TopicProperties.MaxSizeInMegabytes = &maxSize
- parameters.TopicProperties.RequiresDuplicateDetection = &requiresDuplicateDetection
- parameters.TopicProperties.SupportOrdering = &supportOrdering
-
_, err := client.CreateOrUpdate(resGroup, namespaceName, name, parameters)
if err != nil {
return err
@@ -186,6 +201,7 @@ func resourceArmServiceBusTopicRead(d *schema.ResourceData, meta interface{}) er
d.Set("location", azureRMNormalizeLocation(*resp.Location))
props := resp.TopicProperties
+ d.Set("status", string(props.Status))
d.Set("auto_delete_on_idle", props.AutoDeleteOnIdle)
d.Set("default_message_ttl", props.DefaultMessageTimeToLive)
diff --git a/azurerm/resource_arm_servicebus_topic_test.go b/azurerm/resource_arm_servicebus_topic_test.go
index 8ea9fd9ddbee..096467787a6b 100644
--- a/azurerm/resource_arm_servicebus_topic_test.go
+++ b/azurerm/resource_arm_servicebus_topic_test.go
@@ -11,18 +11,72 @@ import (
)
func TestAccAzureRMServiceBusTopic_basic(t *testing.T) {
+ resourceName := "azurerm_servicebus_topic.test"
ri := acctest.RandInt()
- config := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri)
+ config := testAccAzureRMServiceBusTopic_basic(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"),
+ testCheckAzureRMServiceBusTopicExists(resourceName),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMServiceBusTopic_basicDisabled(t *testing.T) {
+ resourceName := "azurerm_servicebus_topic.test"
+ ri := acctest.RandInt()
+ config := testAccAzureRMServiceBusTopic_basicDisabled(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusTopicExists(resourceName),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMServiceBusTopic_basicDisableEnable(t *testing.T) {
+ resourceName := "azurerm_servicebus_topic.test"
+ ri := acctest.RandInt()
+ enabledConfig := testAccAzureRMServiceBusTopic_basic(ri)
+ disabledConfig := testAccAzureRMServiceBusTopic_basicDisabled(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: enabledConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusTopicExists(resourceName),
+ ),
+ },
+ {
+ Config: disabledConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusTopicExists(resourceName),
+ ),
+ },
+ {
+ Config: enabledConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMServiceBusTopicExists(resourceName),
),
},
},
@@ -31,21 +85,21 @@ func TestAccAzureRMServiceBusTopic_basic(t *testing.T) {
func TestAccAzureRMServiceBusTopic_update(t *testing.T) {
ri := acctest.RandInt()
- preConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri)
- postConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_update, ri, ri, ri)
+ preConfig := testAccAzureRMServiceBusTopic_basic(ri)
+ postConfig := testAccAzureRMServiceBusTopic_update(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"),
),
},
- resource.TestStep{
+ {
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
@@ -59,29 +113,28 @@ func TestAccAzureRMServiceBusTopic_update(t *testing.T) {
}
func TestAccAzureRMServiceBusTopic_enablePartitioningStandard(t *testing.T) {
+ resourceName := "azurerm_servicebus_topic.test"
ri := acctest.RandInt()
- preConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri)
- postConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_enablePartitioningStandard, ri, ri, ri)
+ preConfig := testAccAzureRMServiceBusTopic_basic(ri)
+ postConfig := testAccAzureRMServiceBusTopic_enablePartitioningStandard(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"),
+ testCheckAzureRMServiceBusTopicExists(resourceName),
),
},
- resource.TestStep{
+ {
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
- resource.TestCheckResourceAttr(
- "azurerm_servicebus_topic.test", "enable_partitioning", "true"),
+ resource.TestCheckResourceAttr(resourceName, "enable_partitioning", "true"),
// Ensure size is read back in it's original value and not the x16 value returned by Azure
- resource.TestCheckResourceAttr(
- "azurerm_servicebus_topic.test", "max_size_in_megabytes", "5120"),
+ resource.TestCheckResourceAttr(resourceName, "max_size_in_megabytes", "5120"),
),
},
},
@@ -89,28 +142,27 @@ func TestAccAzureRMServiceBusTopic_enablePartitioningStandard(t *testing.T) {
}
func TestAccAzureRMServiceBusTopic_enablePartitioningPremium(t *testing.T) {
+ resourceName := "azurerm_servicebus_topic.test"
ri := acctest.RandInt()
- preConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri)
- postConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_enablePartitioningPremium, ri, ri, ri)
+ preConfig := testAccAzureRMServiceBusTopic_basic(ri)
+ postConfig := testAccAzureRMServiceBusTopic_enablePartitioningPremium(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"),
+ testCheckAzureRMServiceBusTopicExists(resourceName),
),
},
- resource.TestStep{
+ {
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
- resource.TestCheckResourceAttr(
- "azurerm_servicebus_topic.test", "enable_partitioning", "true"),
- resource.TestCheckResourceAttr(
- "azurerm_servicebus_topic.test", "max_size_in_megabytes", "81920"),
+ resource.TestCheckResourceAttr(resourceName, "enable_partitioning", "true"),
+ resource.TestCheckResourceAttr(resourceName, "max_size_in_megabytes", "81920"),
),
},
},
@@ -118,26 +170,26 @@ func TestAccAzureRMServiceBusTopic_enablePartitioningPremium(t *testing.T) {
}
func TestAccAzureRMServiceBusTopic_enableDuplicateDetection(t *testing.T) {
+ resourceName := "azurerm_servicebus_topic.test"
ri := acctest.RandInt()
- preConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_basic, ri, ri, ri)
- postConfig := fmt.Sprintf(testAccAzureRMServiceBusTopic_enableDuplicateDetection, ri, ri, ri)
+ preConfig := testAccAzureRMServiceBusTopic_basic(ri)
+ postConfig := testAccAzureRMServiceBusTopic_enableDuplicateDetection(ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMServiceBusTopicDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
- testCheckAzureRMServiceBusTopicExists("azurerm_servicebus_topic.test"),
+ testCheckAzureRMServiceBusTopicExists(resourceName),
),
},
- resource.TestStep{
+ {
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
- resource.TestCheckResourceAttr(
- "azurerm_servicebus_topic.test", "requires_duplicate_detection", "true"),
+ resource.TestCheckResourceAttr(resourceName, "requires_duplicate_detection", "true"),
),
},
},
@@ -202,7 +254,8 @@ func testCheckAzureRMServiceBusTopicExists(name string) resource.TestCheckFunc {
}
}
-var testAccAzureRMServiceBusTopic_basic = `
+func testAccAzureRMServiceBusTopic_basic(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -221,9 +274,11 @@ resource "azurerm_servicebus_topic" "test" {
namespace_name = "${azurerm_servicebus_namespace.test.name}"
resource_group_name = "${azurerm_resource_group.test.name}"
}
-`
+`, rInt, rInt, rInt)
+}
-var testAccAzureRMServiceBusTopic_basicPremium = `
+func testAccAzureRMServiceBusTopic_basicDisabled(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -233,7 +288,7 @@ resource "azurerm_servicebus_namespace" "test" {
name = "acctestservicebusnamespace-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "premium"
+ sku = "standard"
}
resource "azurerm_servicebus_topic" "test" {
@@ -241,10 +296,13 @@ resource "azurerm_servicebus_topic" "test" {
location = "West US"
namespace_name = "${azurerm_servicebus_namespace.test.name}"
resource_group_name = "${azurerm_resource_group.test.name}"
+ status = "disabled"
+}
+`, rInt, rInt, rInt)
}
-`
-var testAccAzureRMServiceBusTopic_update = `
+func testAccAzureRMServiceBusTopic_update(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -265,9 +323,11 @@ resource "azurerm_servicebus_topic" "test" {
enable_batched_operations = true
enable_express = true
}
-`
+`, rInt, rInt, rInt)
+}
-var testAccAzureRMServiceBusTopic_enablePartitioningStandard = `
+func testAccAzureRMServiceBusTopic_enablePartitioningStandard(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -286,11 +346,13 @@ resource "azurerm_servicebus_topic" "test" {
namespace_name = "${azurerm_servicebus_namespace.test.name}"
resource_group_name = "${azurerm_resource_group.test.name}"
enable_partitioning = true
- max_size_in_megabytes = 5120
+ max_size_in_megabytes = 5120
+}
+`, rInt, rInt, rInt)
}
-`
-var testAccAzureRMServiceBusTopic_enablePartitioningPremium = `
+func testAccAzureRMServiceBusTopic_enablePartitioningPremium(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -309,11 +371,13 @@ resource "azurerm_servicebus_topic" "test" {
namespace_name = "${azurerm_servicebus_namespace.test.name}"
resource_group_name = "${azurerm_resource_group.test.name}"
enable_partitioning = true
- max_size_in_megabytes = 81920
+ max_size_in_megabytes = 81920
+}
+`, rInt, rInt, rInt)
}
-`
-var testAccAzureRMServiceBusTopic_enableDuplicateDetection = `
+func testAccAzureRMServiceBusTopic_enableDuplicateDetection(rInt int) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "West US"
@@ -333,4 +397,5 @@ resource "azurerm_servicebus_topic" "test" {
resource_group_name = "${azurerm_resource_group.test.name}"
requires_duplicate_detection = true
}
-`
+`, rInt, rInt, rInt)
+}
diff --git a/azurerm/resource_arm_storage_account.go b/azurerm/resource_arm_storage_account.go
index ffb4e14ee48b..46ae820f59c0 100644
--- a/azurerm/resource_arm_storage_account.go
+++ b/azurerm/resource_arm_storage_account.go
@@ -137,6 +137,16 @@ func resourceArmStorageAccount() *schema.Resource {
Computed: true,
},
+ "primary_blob_connection_string": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
+ "secondary_blob_connection_string": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+
"tags": tagsSchema(),
},
}
@@ -351,7 +361,7 @@ func resourceArmStorageAccountRead(d *schema.ResourceData, meta interface{}) err
d.Set("resource_group_name", resGroup)
d.Set("primary_access_key", accessKeys[0].Value)
d.Set("secondary_access_key", accessKeys[1].Value)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
d.Set("account_kind", resp.Kind)
d.Set("account_type", resp.Sku.Name)
d.Set("primary_location", resp.AccountProperties.PrimaryLocation)
@@ -366,13 +376,21 @@ func resourceArmStorageAccountRead(d *schema.ResourceData, meta interface{}) err
d.Set("primary_queue_endpoint", resp.AccountProperties.PrimaryEndpoints.Queue)
d.Set("primary_table_endpoint", resp.AccountProperties.PrimaryEndpoints.Table)
d.Set("primary_file_endpoint", resp.AccountProperties.PrimaryEndpoints.File)
+
+ pscs := fmt.Sprintf("DefaultEndpointsProtocol=https;BlobEndpoint=%s;AccountName=%s;AccountKey=%s",
+ *resp.AccountProperties.PrimaryEndpoints.Blob, *resp.Name, *accessKeys[0].Value)
+ d.Set("primary_blob_connection_string", pscs)
}
if resp.AccountProperties.SecondaryEndpoints != nil {
if resp.AccountProperties.SecondaryEndpoints.Blob != nil {
d.Set("secondary_blob_endpoint", resp.AccountProperties.SecondaryEndpoints.Blob)
+ sscs := fmt.Sprintf("DefaultEndpointsProtocol=https;BlobEndpoint=%s;AccountName=%s;AccountKey=%s",
+ *resp.AccountProperties.SecondaryEndpoints.Blob, *resp.Name, *accessKeys[1].Value)
+ d.Set("secondary_blob_connection_string", sscs)
} else {
d.Set("secondary_blob_endpoint", "")
+ d.Set("secondary_blob_connection_string", "")
}
if resp.AccountProperties.SecondaryEndpoints.Queue != nil {
d.Set("secondary_queue_endpoint", resp.AccountProperties.SecondaryEndpoints.Queue)
diff --git a/azurerm/resource_arm_storage_account_test.go b/azurerm/resource_arm_storage_account_test.go
index 6599a70581f2..09a324c08352 100644
--- a/azurerm/resource_arm_storage_account_test.go
+++ b/azurerm/resource_arm_storage_account_test.go
@@ -53,15 +53,15 @@ func TestValidateArmStorageAccountName(t *testing.T) {
func TestAccAzureRMStorageAccount_basic(t *testing.T) {
ri := acctest.RandInt()
rs := acctest.RandString(4)
- preConfig := fmt.Sprintf(testAccAzureRMStorageAccount_basic, ri, rs)
- postConfig := fmt.Sprintf(testAccAzureRMStorageAccount_update, ri, rs)
+ preConfig := testAccAzureRMStorageAccount_basic(ri, rs)
+ postConfig := testAccAzureRMStorageAccount_update(ri, rs)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMStorageAccountDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
@@ -71,7 +71,7 @@ func TestAccAzureRMStorageAccount_basic(t *testing.T) {
),
},
- resource.TestStep{
+ {
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
@@ -87,14 +87,14 @@ func TestAccAzureRMStorageAccount_basic(t *testing.T) {
func TestAccAzureRMStorageAccount_disappears(t *testing.T) {
ri := acctest.RandInt()
rs := acctest.RandString(4)
- preConfig := fmt.Sprintf(testAccAzureRMStorageAccount_basic, ri, rs)
+ preConfig := testAccAzureRMStorageAccount_basic(ri, rs)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMStorageAccountDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
@@ -109,18 +109,39 @@ func TestAccAzureRMStorageAccount_disappears(t *testing.T) {
})
}
+func TestAccAzureRMStorageAccount_blobConnectionString(t *testing.T) {
+ ri := acctest.RandInt()
+ rs := acctest.RandString(4)
+ preConfig := testAccAzureRMStorageAccount_basic(ri, rs)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMStorageAccountDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: preConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
+ resource.TestCheckResourceAttrSet("azurerm_storage_account.testsa", "primary_blob_connection_string"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAzureRMStorageAccount_blobEncryption(t *testing.T) {
ri := acctest.RandInt()
rs := acctest.RandString(4)
- preConfig := fmt.Sprintf(testAccAzureRMStorageAccount_blobEncryption, ri, rs)
- postConfig := fmt.Sprintf(testAccAzureRMStorageAccount_blobEncryptionDisabled, ri, rs)
+ preConfig := testAccAzureRMStorageAccount_blobEncryption(ri, rs)
+ postConfig := testAccAzureRMStorageAccount_blobEncryptionDisabled(ri, rs)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMStorageAccountDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
@@ -128,7 +149,7 @@ func TestAccAzureRMStorageAccount_blobEncryption(t *testing.T) {
),
},
- resource.TestStep{
+ {
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
@@ -142,15 +163,15 @@ func TestAccAzureRMStorageAccount_blobEncryption(t *testing.T) {
func TestAccAzureRMStorageAccount_blobStorageWithUpdate(t *testing.T) {
ri := acctest.RandInt()
rs := acctest.RandString(4)
- preConfig := fmt.Sprintf(testAccAzureRMStorageAccount_blobStorage, ri, rs)
- postConfig := fmt.Sprintf(testAccAzureRMStorageAccount_blobStorageUpdate, ri, rs)
+ preConfig := testAccAzureRMStorageAccount_blobStorage(ri, rs)
+ postConfig := testAccAzureRMStorageAccount_blobStorageUpdate(ri, rs)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMStorageAccountDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
@@ -159,7 +180,7 @@ func TestAccAzureRMStorageAccount_blobStorageWithUpdate(t *testing.T) {
),
},
- resource.TestStep{
+ {
Config: postConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
@@ -180,14 +201,14 @@ func TestAccAzureRMStorageAccount_NonStandardCasing(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMStorageAccountDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: preConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMStorageAccountExists("azurerm_storage_account.testsa"),
),
},
- resource.TestStep{
+ {
Config: preConfig,
PlanOnly: true,
ExpectNonEmptyPlan: false,
@@ -270,7 +291,8 @@ func testCheckAzureRMStorageAccountDestroy(s *terraform.State) error {
return nil
}
-var testAccAzureRMStorageAccount_basic = `
+func testAccAzureRMStorageAccount_basic(rInt int, rString string) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "testrg" {
name = "testAccAzureRMSA-%d"
location = "westus"
@@ -286,9 +308,11 @@ resource "azurerm_storage_account" "testsa" {
tags {
environment = "production"
}
-}`
+}`, rInt, rString)
+}
-var testAccAzureRMStorageAccount_update = `
+func testAccAzureRMStorageAccount_update(rInt int, rString string) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "testrg" {
name = "testAccAzureRMSA-%d"
location = "westus"
@@ -304,9 +328,11 @@ resource "azurerm_storage_account" "testsa" {
tags {
environment = "staging"
}
-}`
+}`, rInt, rString)
+}
-var testAccAzureRMStorageAccount_blobEncryption = `
+func testAccAzureRMStorageAccount_blobEncryption(rInt int, rString string) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "testrg" {
name = "testAccAzureRMSA-%d"
location = "westus"
@@ -323,9 +349,11 @@ resource "azurerm_storage_account" "testsa" {
tags {
environment = "production"
}
-}`
+}`, rInt, rString)
+}
-var testAccAzureRMStorageAccount_blobEncryptionDisabled = `
+func testAccAzureRMStorageAccount_blobEncryptionDisabled(rInt int, rString string) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "testrg" {
name = "testAccAzureRMSA-%d"
location = "westus"
@@ -342,10 +370,12 @@ resource "azurerm_storage_account" "testsa" {
tags {
environment = "production"
}
-}`
+}`, rInt, rString)
+}
// BlobStorage accounts are not available in WestUS
-var testAccAzureRMStorageAccount_blobStorage = `
+func testAccAzureRMStorageAccount_blobStorage(rInt int, rString string) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "testrg" {
name = "testAccAzureRMSA-%d"
location = "northeurope"
@@ -362,9 +392,12 @@ resource "azurerm_storage_account" "testsa" {
tags {
environment = "production"
}
-}`
+}
+`, rInt, rString)
+}
-var testAccAzureRMStorageAccount_blobStorageUpdate = `
+func testAccAzureRMStorageAccount_blobStorageUpdate(rInt int, rString string) string {
+ return fmt.Sprintf(`
resource "azurerm_resource_group" "testrg" {
name = "testAccAzureRMSA-%d"
location = "northeurope"
@@ -382,7 +415,9 @@ resource "azurerm_storage_account" "testsa" {
tags {
environment = "production"
}
-}`
+}
+`, rInt, rString)
+}
func testAccAzureRMStorageAccountNonStandardCasing(ri int, rs string) string {
return fmt.Sprintf(`
diff --git a/azurerm/resource_arm_storage_table.go b/azurerm/resource_arm_storage_table.go
index 3db39165a93e..f82ec1d8338c 100644
--- a/azurerm/resource_arm_storage_table.go
+++ b/azurerm/resource_arm_storage_table.go
@@ -43,9 +43,9 @@ func validateArmStorageTableName(v interface{}, k string) (ws []string, errors [
"Table Storage %q cannot use the word `table`: %q",
k, value))
}
- if !regexp.MustCompile(`^[A-Za-z][A-Za-z0-9]{6,63}$`).MatchString(value) {
+ if !regexp.MustCompile(`^[A-Za-z][A-Za-z0-9]{2,62}$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
- "Table Storage %q cannot begin with a numeric character, only alphanumeric characters are allowed and must be between 6 and 63 characters long: %q",
+ "Table Storage %q cannot begin with a numeric character, only alphanumeric characters are allowed and must be between 3 and 63 characters long: %q",
k, value))
}
diff --git a/azurerm/resource_arm_storage_table_test.go b/azurerm/resource_arm_storage_table_test.go
index 33cc095aa738..f3b99f5441ec 100644
--- a/azurerm/resource_arm_storage_table_test.go
+++ b/azurerm/resource_arm_storage_table_test.go
@@ -192,6 +192,8 @@ func TestValidateArmStorageTableName(t *testing.T) {
"mytable",
"myTable",
"MYTABLE",
+ "tbl",
+ strings.Repeat("w", 63),
}
for _, v := range validNames {
_, errors := validateArmStorageTableName(v, "name")
@@ -206,7 +208,7 @@ func TestValidateArmStorageTableName(t *testing.T) {
"invalid_name",
"invalid!",
"ww",
- strings.Repeat("w", 65),
+ strings.Repeat("w", 64),
}
for _, v := range invalidNames {
_, errors := validateArmStorageTableName(v, "name")
diff --git a/azurerm/resource_arm_subnet.go b/azurerm/resource_arm_subnet.go
index 044c6d4f1e0c..b991b4a1a40f 100644
--- a/azurerm/resource_arm_subnet.go
+++ b/azurerm/resource_arm_subnet.go
@@ -9,6 +9,8 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
+var subnetResourceName = "azurerm_subnet"
+
func resourceArmSubnet() *schema.Resource {
return &schema.Resource{
Create: resourceArmSubnetCreate,
@@ -77,11 +79,8 @@ func resourceArmSubnetCreate(d *schema.ResourceData, meta interface{}) error {
resGroup := d.Get("resource_group_name").(string)
addressPrefix := d.Get("address_prefix").(string)
- armMutexKV.Lock(name)
- defer armMutexKV.Unlock(name)
-
- armMutexKV.Lock(vnetName)
- defer armMutexKV.Unlock(vnetName)
+ azureRMLockByName(vnetName, virtualNetworkResourceName)
+ defer azureRMUnlockByName(vnetName, virtualNetworkResourceName)
properties := network.SubnetPropertiesFormat{
AddressPrefix: &addressPrefix,
@@ -98,8 +97,8 @@ func resourceArmSubnetCreate(d *schema.ResourceData, meta interface{}) error {
return err
}
- armMutexKV.Lock(networkSecurityGroupName)
- defer armMutexKV.Unlock(networkSecurityGroupName)
+ azureRMLockByName(networkSecurityGroupName, networkSecurityGroupResourceName)
+ defer azureRMUnlockByName(networkSecurityGroupName, networkSecurityGroupResourceName)
}
if v, ok := d.GetOk("route_table_id"); ok {
@@ -113,8 +112,8 @@ func resourceArmSubnetCreate(d *schema.ResourceData, meta interface{}) error {
return err
}
- armMutexKV.Lock(routeTableName)
- defer armMutexKV.Unlock(routeTableName)
+ azureRMLockByName(routeTableName, routeTableResourceName)
+ defer azureRMUnlockByName(routeTableName, routeTableResourceName)
}
subnet := network.Subnet{
@@ -169,10 +168,14 @@ func resourceArmSubnetRead(d *schema.ResourceData, meta interface{}) error {
if resp.SubnetPropertiesFormat.NetworkSecurityGroup != nil {
d.Set("network_security_group_id", resp.SubnetPropertiesFormat.NetworkSecurityGroup.ID)
+ } else {
+ d.Set("network_security_group_id", "")
}
if resp.SubnetPropertiesFormat.RouteTable != nil {
d.Set("route_table_id", resp.SubnetPropertiesFormat.RouteTable.ID)
+ } else {
+ d.Set("route_table_id", "")
}
if resp.SubnetPropertiesFormat.IPConfigurations != nil {
@@ -209,8 +212,8 @@ func resourceArmSubnetDelete(d *schema.ResourceData, meta interface{}) error {
return err
}
- armMutexKV.Lock(networkSecurityGroupName)
- defer armMutexKV.Unlock(networkSecurityGroupName)
+ azureRMLockByName(networkSecurityGroupName, networkSecurityGroupResourceName)
+ defer azureRMUnlockByName(networkSecurityGroupName, networkSecurityGroupResourceName)
}
if v, ok := d.GetOk("route_table_id"); ok {
@@ -220,15 +223,15 @@ func resourceArmSubnetDelete(d *schema.ResourceData, meta interface{}) error {
return err
}
- armMutexKV.Lock(routeTableName)
- defer armMutexKV.Unlock(routeTableName)
+ azureRMLockByName(routeTableName, routeTableResourceName)
+ defer azureRMUnlockByName(routeTableName, routeTableResourceName)
}
- armMutexKV.Lock(vnetName)
- defer armMutexKV.Unlock(vnetName)
+ azureRMLockByName(vnetName, virtualNetworkResourceName)
+ defer azureRMUnlockByName(vnetName, virtualNetworkResourceName)
- armMutexKV.Lock(name)
- defer armMutexKV.Unlock(name)
+ azureRMLockByName(name, subnetResourceName)
+ defer azureRMUnlockByName(name, subnetResourceName)
_, error := subnetClient.Delete(resGroup, vnetName, name, make(chan struct{}))
err = <-error
diff --git a/azurerm/resource_arm_subnet_test.go b/azurerm/resource_arm_subnet_test.go
index 06d8ba473af8..3100f8a8e2cb 100644
--- a/azurerm/resource_arm_subnet_test.go
+++ b/azurerm/resource_arm_subnet_test.go
@@ -22,7 +22,7 @@ func TestAccAzureRMSubnet_basic(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMSubnetDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMSubnetExists("azurerm_subnet.test"),
@@ -43,14 +43,14 @@ func TestAccAzureRMSubnet_routeTableUpdate(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMSubnetDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: initConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMSubnetExists("azurerm_subnet.test"),
),
},
- resource.TestStep{
+ {
Config: updatedConfig,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMSubnetRouteTableExists("azurerm_subnet.test", fmt.Sprintf("acctest-%d", ri)),
@@ -60,6 +60,45 @@ func TestAccAzureRMSubnet_routeTableUpdate(t *testing.T) {
})
}
+func TestAccAzureRMSubnet_bug7986(t *testing.T) {
+ ri := acctest.RandInt()
+ initConfig := testAccAzureRMSubnet_bug7986(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMSubnetDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: initConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMSubnetExists("azurerm_subnet.first"),
+ testCheckAzureRMSubnetExists("azurerm_subnet.second"),
+ ),
+ },
+ },
+ })
+}
+
+func TestAccAzureRMSubnet_bug15204(t *testing.T) {
+ ri := acctest.RandInt()
+ initConfig := testAccAzureRMSubnet_bug15204(ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMSubnetDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: initConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMSubnetExists("azurerm_subnet.test"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAzureRMSubnet_disappears(t *testing.T) {
ri := acctest.RandInt()
@@ -70,7 +109,7 @@ func TestAccAzureRMSubnet_disappears(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMSubnetDestroy,
Steps: []resource.TestStep{
- resource.TestStep{
+ {
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMSubnetExists("azurerm_subnet.test"),
@@ -369,3 +408,99 @@ resource "azurerm_route" "route_a" {
next_hop_in_ip_address = "10.10.1.1"
}`, rInt, rInt, rInt, rInt, rInt, rInt, rInt)
}
+
+func testAccAzureRMSubnet_bug7986(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctest%d-rg"
+ location = "West Europe"
+}
+
+resource "azurerm_virtual_network" "test" {
+ name = "acctest%d-vn"
+ address_space = ["10.0.0.0/16"]
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_route_table" "first" {
+ name = "acctest%d-private-1"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_route" "first" {
+ name = "acctest%d-private-1"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ route_table_name = "${azurerm_route_table.first.name}"
+ address_prefix = "0.0.0.0/0"
+ next_hop_type = "None"
+}
+
+resource "azurerm_subnet" "first" {
+ name = "acctest%d-private-1"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ virtual_network_name = "${azurerm_virtual_network.test.name}"
+ address_prefix = "10.0.0.0/24"
+ route_table_id = "${azurerm_route_table.first.id}"
+}
+
+resource "azurerm_route_table" "second" {
+ name = "acctest%d-private-2"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_route" "second" {
+ name = "acctest%d-private-2"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ route_table_name = "${azurerm_route_table.second.name}"
+ address_prefix = "0.0.0.0/0"
+ next_hop_type = "None"
+}
+
+resource "azurerm_subnet" "second" {
+ name = "acctest%d-private-2"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ virtual_network_name = "${azurerm_virtual_network.test.name}"
+ address_prefix = "10.0.1.0/24"
+ route_table_id = "${azurerm_route_table.second.id}"
+}`, rInt, rInt, rInt, rInt, rInt, rInt, rInt, rInt)
+}
+
+func testAccAzureRMSubnet_bug15204(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctest-%d"
+ location = "West Europe"
+}
+
+resource "azurerm_virtual_network" "test" {
+ name = "acctestvn-%d"
+ address_space = ["10.85.0.0/16"]
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_network_security_group" "test" {
+ name = "acctestnsg-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_route_table" "test" {
+ name = "acctestrt-%d"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_subnet" "test" {
+ name = "acctestsubnet-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ virtual_network_name = "${azurerm_virtual_network.test.name}"
+ address_prefix = "10.85.9.0/24"
+ route_table_id = "${azurerm_route_table.test.id}"
+ network_security_group_id = "${azurerm_network_security_group.test.id}"
+}
+`, rInt, rInt, rInt, rInt, rInt)
+}
diff --git a/azurerm/resource_arm_traffic_manager_endpoint.go b/azurerm/resource_arm_traffic_manager_endpoint.go
index 062f04e9f961..0936ce828ec1 100644
--- a/azurerm/resource_arm_traffic_manager_endpoint.go
+++ b/azurerm/resource_arm_traffic_manager_endpoint.go
@@ -73,12 +73,13 @@ func resourceArmTrafficManagerEndpoint() *schema.Resource {
ValidateFunc: validation.IntBetween(1, 1000),
},
+ // when targeting an Azure resource the location of that resource will be set on the endpoint
"endpoint_location": {
- Type: schema.TypeString,
- Optional: true,
- // when targeting an Azure resource the location of that resource will be set on the endpoint
- Computed: true,
- StateFunc: azureRMNormalizeLocation,
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ StateFunc: azureRMNormalizeLocation,
+ DiffSuppressFunc: azureRMSuppressLocationDiff,
},
"min_child_endpoints": {
diff --git a/azurerm/resource_arm_traffic_manager_endpoint_test.go b/azurerm/resource_arm_traffic_manager_endpoint_test.go
index a4051ec1c035..cb1e82466066 100644
--- a/azurerm/resource_arm_traffic_manager_endpoint_test.go
+++ b/azurerm/resource_arm_traffic_manager_endpoint_test.go
@@ -175,6 +175,32 @@ func TestAccAzureRMTrafficManagerEndpoint_nestedEndpoints(t *testing.T) {
})
}
+func TestAccAzureRMTrafficManagerEndpoint_location(t *testing.T) {
+ ri := acctest.RandInt()
+ first := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_location, ri, ri, ri, ri)
+ second := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_locationUpdated, ri, ri, ri, ri)
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: first,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.test"),
+ ),
+ },
+ {
+ Config: second,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.test"),
+ ),
+ },
+ },
+ })
+}
+
func testCheckAzureRMTrafficManagerEndpointExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
@@ -589,3 +615,69 @@ resource "azurerm_traffic_manager_endpoint" "externalChild" {
resource_group_name = "${azurerm_resource_group.test.name}"
}
`
+
+var testAccAzureRMTrafficManagerEndpoint_location = `
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US"
+}
+
+resource "azurerm_traffic_manager_profile" "test" {
+ name = "acctesttmpparent%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ traffic_routing_method = "Performance"
+
+ dns_config {
+ relative_name = "acctestparent%d"
+ ttl = 30
+ }
+
+ monitor_config {
+ protocol = "https"
+ port = 443
+ path = "/"
+ }
+}
+
+resource "azurerm_traffic_manager_endpoint" "test" {
+ name = "acctestend-external%d"
+ type = "externalEndpoints"
+ target = "terraform.io"
+ endpoint_location = "${azurerm_resource_group.test.location}"
+ profile_name = "${azurerm_traffic_manager_profile.test.name}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+`
+
+var testAccAzureRMTrafficManagerEndpoint_locationUpdated = `
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "westus"
+}
+
+resource "azurerm_traffic_manager_profile" "test" {
+ name = "acctesttmpparent%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ traffic_routing_method = "Performance"
+
+ dns_config {
+ relative_name = "acctestparent%d"
+ ttl = 30
+ }
+
+ monitor_config {
+ protocol = "https"
+ port = 443
+ path = "/"
+ }
+}
+
+resource "azurerm_traffic_manager_endpoint" "test" {
+ name = "acctestend-external%d"
+ type = "externalEndpoints"
+ target = "terraform.io"
+ endpoint_location = "${azurerm_resource_group.test.location}"
+ profile_name = "${azurerm_traffic_manager_profile.test.name}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+`
diff --git a/azurerm/resource_arm_virtual_machine.go b/azurerm/resource_arm_virtual_machine.go
index 8e0f56b24ec2..bf5c4dfdf632 100644
--- a/azurerm/resource_arm_virtual_machine.go
+++ b/azurerm/resource_arm_virtual_machine.go
@@ -333,8 +333,9 @@ func resourceArmVirtualMachine() *schema.Resource {
},
"admin_password": {
- Type: schema.TypeString,
- Required: true,
+ Type: schema.TypeString,
+ Optional: true,
+ Sensitive: true,
},
"custom_data": {
@@ -628,7 +629,7 @@ func resourceArmVirtualMachineRead(d *schema.ResourceData, meta interface{}) err
d.Set("name", resp.Name)
d.Set("resource_group_name", resGroup)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
if resp.Plan != nil {
if err := d.Set("plan", schema.NewSet(resourceArmVirtualMachinePlanHash, flattenAzureRmVirtualMachinePlan(resp.Plan))); err != nil {
diff --git a/azurerm/resource_arm_virtual_machine_scale_set.go b/azurerm/resource_arm_virtual_machine_scale_set.go
index a48cb0cc1014..13e3fb25003e 100644
--- a/azurerm/resource_arm_virtual_machine_scale_set.go
+++ b/azurerm/resource_arm_virtual_machine_scale_set.go
@@ -299,7 +299,7 @@ func resourceArmVirtualMachineScaleSet() *schema.Resource {
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
- Required: true,
+ Optional: true,
},
"image": {
@@ -1182,6 +1182,10 @@ func expandAzureRMVirtualMachineScaleSetsStorageProfileOsDisk(d *schema.Resource
createOption := osDiskConfig["create_option"].(string)
managedDiskType := osDiskConfig["managed_disk_type"].(string)
+ if managedDiskType == "" && name == "" {
+ return nil, fmt.Errorf("[ERROR] `name` must be set in `storage_profile_os_disk` for unmanaged disk")
+ }
+
osDisk := &compute.VirtualMachineScaleSetOSDisk{
Name: &name,
Caching: compute.CachingTypes(caching),
@@ -1207,13 +1211,13 @@ func expandAzureRMVirtualMachineScaleSetsStorageProfileOsDisk(d *schema.Resource
managedDisk := &compute.VirtualMachineScaleSetManagedDiskParameters{}
if managedDiskType != "" {
- if name == "" {
- osDisk.Name = nil
- managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType)
- osDisk.ManagedDisk = managedDisk
- } else {
- return nil, fmt.Errorf("[ERROR] Conflict between `name` and `managed_disk_type` on `storage_profile_os_disk` (please set name to blank)")
+ if name != "" {
+ return nil, fmt.Errorf("[ERROR] Conflict between `name` and `managed_disk_type` on `storage_profile_os_disk` (please remove name or set it to blank)")
}
+
+ osDisk.Name = nil
+ managedDisk.StorageAccountType = compute.StorageAccountTypes(managedDiskType)
+ osDisk.ManagedDisk = managedDisk
}
//BEGIN: code to be removed after GH-13016 is merged
diff --git a/azurerm/resource_arm_virtual_machine_scale_set_test.go b/azurerm/resource_arm_virtual_machine_scale_set_test.go
index 4687c39247d0..17a62edfbb96 100644
--- a/azurerm/resource_arm_virtual_machine_scale_set_test.go
+++ b/azurerm/resource_arm_virtual_machine_scale_set_test.go
@@ -97,6 +97,24 @@ func TestAccAzureRMVirtualMachineScaleSet_basicLinux_managedDisk(t *testing.T) {
})
}
+func TestAccAzureRMVirtualMachineScaleSet_basicLinux_managedDiskNoName(t *testing.T) {
+ ri := acctest.RandInt()
+ config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basicLinux_managedDiskNoName, ri, ri, ri, ri, ri, ri)
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMVirtualMachineScaleSetDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMVirtualMachineScaleSetExists("azurerm_virtual_machine_scale_set.test"),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAzureRMVirtualMachineScaleSet_basicLinux_disappears(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMVirtualMachineScaleSet_basic, ri, ri, ri, ri, ri, ri, ri, ri)
@@ -898,6 +916,68 @@ resource "azurerm_virtual_machine_scale_set" "test" {
}
`
+var testAccAzureRMVirtualMachineScaleSet_basicLinux_managedDiskNoName = `
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US 2"
+}
+
+resource "azurerm_virtual_network" "test" {
+ name = "acctvn-%d"
+ address_space = ["10.0.0.0/16"]
+ location = "West US 2"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_subnet" "test" {
+ name = "acctsub-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ virtual_network_name = "${azurerm_virtual_network.test.name}"
+ address_prefix = "10.0.2.0/24"
+}
+
+resource "azurerm_virtual_machine_scale_set" "test" {
+ name = "acctvmss-%d"
+ location = "West US 2"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ upgrade_policy_mode = "Manual"
+
+ sku {
+ name = "Standard_D1_v2"
+ tier = "Standard"
+ capacity = 2
+ }
+
+ os_profile {
+ computer_name_prefix = "testvm-%d"
+ admin_username = "myadmin"
+ admin_password = "Passwword1234"
+ }
+
+ network_profile {
+ name = "TestNetworkProfile-%d"
+ primary = true
+ ip_configuration {
+ name = "TestIPConfiguration"
+ subnet_id = "${azurerm_subnet.test.id}"
+ }
+ }
+
+ storage_profile_os_disk {
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ managed_disk_type = "Standard_LRS"
+ }
+
+ storage_profile_image_reference {
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "16.04-LTS"
+ version = "latest"
+ }
+}
+`
+
var testAccAzureRMVirtualMachineScaleSetLoadbalancerTemplate = `
resource "azurerm_resource_group" "test" {
name = "acctestrg-%d"
diff --git a/azurerm/resource_arm_virtual_machine_test.go b/azurerm/resource_arm_virtual_machine_test.go
index 56291b3c88e3..df818c0a6a9b 100644
--- a/azurerm/resource_arm_virtual_machine_test.go
+++ b/azurerm/resource_arm_virtual_machine_test.go
@@ -34,6 +34,25 @@ func TestAccAzureRMVirtualMachine_basicLinuxMachine(t *testing.T) {
})
}
+func TestAccAzureRMVirtualMachine_basicLinuxMachineSSHOnly(t *testing.T) {
+ var vm compute.VirtualMachine
+ ri := acctest.RandInt()
+ config := testAccAzureRMVirtualMachine_basicLinuxMachineSSHOnly(ri)
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testCheckAzureRMVirtualMachineDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeTestCheckFunc(
+ testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_explicit(t *testing.T) {
var vm compute.VirtualMachine
ri := acctest.RandInt()
@@ -1005,6 +1024,100 @@ resource "azurerm_virtual_machine" "test" {
}
`
+func testAccAzureRMVirtualMachine_basicLinuxMachineSSHOnly(rInt int) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctestRG-%d"
+ location = "West US 2"
+}
+
+resource "azurerm_virtual_network" "test" {
+ name = "acctvn-%d"
+ address_space = ["10.0.0.0/16"]
+ location = "West US 2"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_subnet" "test" {
+ name = "acctsub-%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ virtual_network_name = "${azurerm_virtual_network.test.name}"
+ address_prefix = "10.0.2.0/24"
+}
+
+resource "azurerm_network_interface" "test" {
+ name = "acctni-%d"
+ location = "West US 2"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+
+ ip_configuration {
+ name = "testconfiguration1"
+ subnet_id = "${azurerm_subnet.test.id}"
+ private_ip_address_allocation = "dynamic"
+ }
+}
+
+resource "azurerm_storage_account" "test" {
+ name = "accsa%d"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "West US 2"
+ account_type = "Standard_LRS"
+
+ tags {
+ environment = "staging"
+ }
+}
+
+resource "azurerm_storage_container" "test" {
+ name = "vhds"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ storage_account_name = "${azurerm_storage_account.test.name}"
+ container_access_type = "private"
+}
+
+resource "azurerm_virtual_machine" "test" {
+ name = "acctvm-%d"
+ location = "West US 2"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ network_interface_ids = ["${azurerm_network_interface.test.id}"]
+ vm_size = "Standard_D1_v2"
+
+ storage_image_reference {
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "14.04.2-LTS"
+ version = "latest"
+ }
+
+ storage_os_disk {
+ name = "myosdisk1"
+ vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ disk_size_gb = "45"
+ }
+
+ os_profile {
+ computer_name = "hn%d"
+ admin_username = "testadmin"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = true
+ ssh_keys {
+ path = "/home/testadmin/.ssh/authorized_keys"
+ key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCfGyt5W1eJVpDIxlyvAWO594j/azEGohmlxYe7mgSfmUCWjuzILI6nHuHbxhpBDIZJhQ+JAeduXpii61dmThbI89ghGMhzea0OlT3p12e093zqa4goB9g40jdNKmJArER3pMVqs6hmv8y3GlUNkMDSmuoyI8AYzX4n26cUKZbwXQ== mk@mk3"
+ }
+ }
+
+ tags {
+ environment = "Production"
+ cost-center = "Ops"
+ }
+}
+`, rInt, rInt, rInt, rInt, rInt, rInt, rInt)
+}
+
var testAccAzureRMVirtualMachine_basicLinuxMachine_managedDisk_explicit = `
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
diff --git a/azurerm/resource_arm_virtual_network.go b/azurerm/resource_arm_virtual_network.go
index 22118b80cd7e..e3cf34292d56 100644
--- a/azurerm/resource_arm_virtual_network.go
+++ b/azurerm/resource_arm_virtual_network.go
@@ -11,6 +11,8 @@ import (
"github.com/hashicorp/terraform/helper/schema"
)
+var virtualNetworkResourceName = "azurerm_virtual_network"
+
func resourceArmVirtualNetwork() *schema.Resource {
return &schema.Resource{
Create: resourceArmVirtualNetworkCreate,
@@ -114,8 +116,8 @@ func resourceArmVirtualNetworkCreate(d *schema.ResourceData, meta interface{}) e
}
}
- azureRMLockMultiple(&networkSecurityGroupNames)
- defer azureRMUnlockMultiple(&networkSecurityGroupNames)
+ azureRMLockMultipleByName(&networkSecurityGroupNames, networkSecurityGroupResourceName)
+ defer azureRMUnlockMultipleByName(&networkSecurityGroupNames, networkSecurityGroupResourceName)
_, error := vnetClient.CreateOrUpdate(resGroup, name, vnet, make(chan struct{}))
err := <-error
@@ -160,7 +162,7 @@ func resourceArmVirtualNetworkRead(d *schema.ResourceData, meta interface{}) err
// update appropriate values
d.Set("resource_group_name", resGroup)
d.Set("name", resp.Name)
- d.Set("location", resp.Location)
+ d.Set("location", azureRMNormalizeLocation(*resp.Location))
d.Set("address_space", vnet.AddressSpace.AddressPrefixes)
subnets := &schema.Set{
@@ -208,8 +210,8 @@ func resourceArmVirtualNetworkDelete(d *schema.ResourceData, meta interface{}) e
return fmt.Errorf("[ERROR] Error parsing Network Security Group ID's: %+v", err)
}
- azureRMLockMultiple(&nsgNames)
- defer azureRMUnlockMultiple(&nsgNames)
+ azureRMLockMultipleByName(&nsgNames, virtualNetworkResourceName)
+ defer azureRMUnlockMultipleByName(&nsgNames, virtualNetworkResourceName)
_, error := vnetClient.Delete(resGroup, name, make(chan struct{}))
err = <-error
diff --git a/azurerm/resourceid.go b/azurerm/resourceid.go
index bcf5eb45bcdd..5523b43f23bf 100644
--- a/azurerm/resourceid.go
+++ b/azurerm/resourceid.go
@@ -3,6 +3,7 @@ package azurerm
import (
"fmt"
"net/url"
+ "sort"
"strings"
)
@@ -115,7 +116,15 @@ func composeAzureResourceID(idObj *ResourceID) (id string, err error) {
id += fmt.Sprintf("/providers/%s", idObj.Provider)
- for k, v := range idObj.Path {
+ // sort the path keys so our output is deterministic
+ var pathKeys []string
+ for k := range idObj.Path {
+ pathKeys = append(pathKeys, k)
+ }
+ sort.Strings(pathKeys)
+
+ for _, k := range pathKeys {
+ v := idObj.Path[k]
if k == "" || v == "" {
return "", fmt.Errorf("ResourceID.Path cannot contain empty strings")
}
diff --git a/azurerm/resourceid_test.go b/azurerm/resourceid_test.go
index 69d64be932a8..6857f181811d 100644
--- a/azurerm/resourceid_test.go
+++ b/azurerm/resourceid_test.go
@@ -159,9 +159,14 @@ func TestComposeAzureResourceID(t *testing.T) {
"k1": "v1",
"k2": "v2",
"k3": "v3",
+ "k4": "v4",
+ "k5": "v5",
+ "k6": "v6",
+ "k7": "v7",
+ "k8": "v8",
},
},
- "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testGroup1/providers/foo.bar/k1/v1/k2/v2/k3/v3",
+ "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/testGroup1/providers/foo.bar/k1/v1/k2/v2/k3/v3/k4/v4/k5/v5/k6/v6/k7/v7/k8/v8",
false,
},
{
diff --git a/examples/2-vms-loadbalancer-lbrules/README.md b/examples/2-vms-loadbalancer-lbrules/README.md
new file mode 100644
index 000000000000..5730f2ad951e
--- /dev/null
+++ b/examples/2-vms-loadbalancer-lbrules/README.md
@@ -0,0 +1,22 @@
+# Create 2 Virtual Machines under a Load balancer and configures Load Balancing rules for the VMs
+
+This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-2-vms-loadbalancer-lbrules) Azure Quickstart Template. Changes to the ARM template may have occured since the creation of this example may not be reflected here.
+
+This template allows you to create 2 Virtual Machines under a Load balancer and configure a load balancing rule on Port 80. This template also deploys a Storage Account, Virtual Network, Public IP address, Availability Set, and Network Interfaces.
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+If you are committing this template to source control, please insure that you add this file to your .gitignore file.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
diff --git a/examples/2-vms-loadbalancer-lbrules/main.tf b/examples/2-vms-loadbalancer-lbrules/main.tf
new file mode 100644
index 000000000000..495a5565f2df
--- /dev/null
+++ b/examples/2-vms-loadbalancer-lbrules/main.tf
@@ -0,0 +1,145 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_storage_account" "stor" {
+ name = "${var.dns_name}stor"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ account_type = "${var.storage_account_type}"
+}
+
+resource "azurerm_availability_set" "avset" {
+ name = "${var.dns_name}avset"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ platform_fault_domain_count = 2
+ platform_update_domain_count = 2
+ managed = true
+}
+
+resource "azurerm_public_ip" "lbpip" {
+ name = "${var.rg_prefix}-ip"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ public_ip_address_allocation = "dynamic"
+ domain_name_label = "${var.lb_ip_dns_name}"
+}
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.virtual_network_name}"
+ location = "${var.location}"
+ address_space = ["${var.address_space}"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+resource "azurerm_subnet" "subnet" {
+ name = "${var.rg_prefix}subnet"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "${var.subnet_prefix}"
+}
+
+resource "azurerm_lb" "lb" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ name = "${var.rg_prefix}lb"
+ location = "${var.location}"
+
+ frontend_ip_configuration {
+ name = "LoadBalancerFrontEnd"
+ public_ip_address_id = "${azurerm_public_ip.lbpip.id}"
+ }
+}
+
+resource "azurerm_lb_backend_address_pool" "backend_pool" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ name = "BackendPool1"
+}
+
+resource "azurerm_lb_nat_rule" "tcp" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ name = "RDP-VM-${count.index}"
+ protocol = "tcp"
+ frontend_port = "5000${count.index + 1}"
+ backend_port = 3389
+ frontend_ip_configuration_name = "LoadBalancerFrontEnd"
+ count = 2
+}
+
+resource "azurerm_lb_rule" "lb_rule" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ name = "LBRule"
+ protocol = "tcp"
+ frontend_port = 80
+ backend_port = 80
+ frontend_ip_configuration_name = "LoadBalancerFrontEnd"
+ enable_floating_ip = false
+ backend_address_pool_id = "${azurerm_lb_backend_address_pool.backend_pool.id}"
+ idle_timeout_in_minutes = 5
+ probe_id = "${azurerm_lb_probe.lb_probe.id}"
+ depends_on = ["azurerm_lb_probe.lb_probe"]
+}
+
+resource "azurerm_lb_probe" "lb_probe" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ name = "tcpProbe"
+ protocol = "tcp"
+ port = 80
+ interval_in_seconds = 5
+ number_of_probes = 2
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = "nic${count.index}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ count = 2
+
+ ip_configuration {
+ name = "ipconfig${count.index}"
+ subnet_id = "${azurerm_subnet.subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.backend_pool.id}"]
+ load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_rule.tcp.*.id, count.index)}"]
+ }
+}
+
+resource "azurerm_virtual_machine" "vm" {
+ name = "vm${count.index}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ availability_set_id = "${azurerm_availability_set.avset.id}"
+ vm_size = "${var.vm_size}"
+ network_interface_ids = ["${element(azurerm_network_interface.nic.*.id, count.index)}"]
+ count = 2
+
+ storage_image_reference {
+ publisher = "${var.image_publisher}"
+ offer = "${var.image_offer}"
+ sku = "${var.image_sku}"
+ version = "${var.image_version}"
+ }
+
+ storage_os_disk {
+ name = "osdisk${count.index}"
+ create_option = "FromImage"
+ }
+
+ os_profile {
+ computer_name = "${var.hostname}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.admin_password}"
+ }
+}
diff --git a/examples/2-vms-loadbalancer-lbrules/outputs.tf b/examples/2-vms-loadbalancer-lbrules/outputs.tf
new file mode 100644
index 000000000000..19757fa523aa
--- /dev/null
+++ b/examples/2-vms-loadbalancer-lbrules/outputs.tf
@@ -0,0 +1,11 @@
+output "hostname" {
+ value = "${var.hostname}"
+}
+
+output "vm_fqdn" {
+ value = "${azurerm_public_ip.lbpip.fqdn}"
+}
+
+output "ssh_command" {
+ value = "ssh ${var.admin_username}@${azurerm_public_ip.lbpip.fqdn}"
+}
diff --git a/examples/2-vms-loadbalancer-lbrules/variables.tf b/examples/2-vms-loadbalancer-lbrules/variables.tf
new file mode 100644
index 000000000000..0e652606aeb2
--- /dev/null
+++ b/examples/2-vms-loadbalancer-lbrules/variables.tf
@@ -0,0 +1,79 @@
+variable "resource_group" {
+ description = "The name of the resource group in which to create the virtual network."
+}
+
+variable "rg_prefix" {
+ description = "The shortened abbreviation to represent your resource group that will go on the front of some resources."
+ default = "rg"
+}
+
+variable "hostname" {
+ description = "VM name referenced also in storage-related names."
+}
+
+variable "dns_name" {
+ description = " Label for the Domain Name. Will be used to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system."
+}
+
+variable "lb_ip_dns_name" {
+ description = "DNS for Load Balancer IP"
+}
+
+variable "location" {
+ description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
+ default = "southcentralus"
+}
+
+variable "virtual_network_name" {
+ description = "The name for the virtual network."
+ default = "vnet"
+}
+
+variable "address_space" {
+ description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
+ default = "10.0.0.0/16"
+}
+
+variable "subnet_prefix" {
+ description = "The address prefix to use for the subnet."
+ default = "10.0.10.0/24"
+}
+
+variable "storage_account_type" {
+ description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
+ default = "Standard_LRS"
+}
+
+variable "vm_size" {
+ description = "Specifies the size of the virtual machine."
+ default = "Standard_D1"
+}
+
+variable "image_publisher" {
+ description = "name of the publisher of the image (az vm image list)"
+ default = "MicrosoftWindowsServer"
+}
+
+variable "image_offer" {
+ description = "the name of the offer (az vm image list)"
+ default = "WindowsServer"
+}
+
+variable "image_sku" {
+ description = "image sku to apply (az vm image list)"
+ default = "2012-R2-Datacenter"
+}
+
+variable "image_version" {
+ description = "version of the image to apply (az vm image list)"
+ default = "latest"
+}
+
+variable "admin_username" {
+ description = "administrator user name"
+ default = "vmadmin"
+}
+
+variable "admin_password" {
+ description = "administrator password (recommended to disable password auth)"
+}
diff --git a/examples/cdn-with-storage-account/README.md b/examples/cdn-with-storage-account/README.md
new file mode 100644
index 000000000000..047ca5151395
--- /dev/null
+++ b/examples/cdn-with-storage-account/README.md
@@ -0,0 +1,30 @@
+# Create a CDN Profile, a CDN Endpoint with a Storage Account as origin
+
+This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-cdn-with-storage-account) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template.
+
+This template creates a [CDN Profile](https://docs.microsoft.com/en-us/azure/cdn/cdn-overview) and a CDN Endpoint with the origin as a Storage Account. Note that the user needs to create a public container in the Storage Account in order for CDN Endpoint to serve content from the Storage Account.
+
+# Important
+
+The endpoint will not immediately be available for use, as it takes time for the registration to propagate through the CDN. For Azure CDN from Akamai profiles, propagation will usually complete within one minute. For Azure CDN from Verizon profiles, propagation will usually complete within 90 minutes, but in some cases can take longer.
+
+Users who try to use the CDN domain name before the endpoint configuration has propagated to the POPs will receive HTTP 404 response codes. If it has been several hours since you created your endpoint and you're still receiving 404 responses, please see [Troubleshooting CDN endpoints returning 404 statuses](https://docs.microsoft.com/en-us/azure/cdn/cdn-troubleshoot-endpoint).
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![graph](/examples/azure-cdn-with-storage-account/graph.png)
diff --git a/examples/cdn-with-storage-account/main.tf b/examples/cdn-with-storage-account/main.tf
new file mode 100644
index 000000000000..0f50bb5eb996
--- /dev/null
+++ b/examples/cdn-with-storage-account/main.tf
@@ -0,0 +1,39 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_storage_account" "stor" {
+ name = "${var.resource_group}stor"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ account_type = "${var.storage_account_type}"
+}
+
+resource "azurerm_cdn_profile" "cdn" {
+ name = "${var.resource_group}CdnProfile1"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ sku = "Standard_Akamai"
+}
+
+resource "azurerm_cdn_endpoint" "cdnendpt" {
+ name = "${var.resource_group}CdnEndpoint1"
+ profile_name = "${azurerm_cdn_profile.cdn.name}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ origin {
+ name = "${var.resource_group}Origin1"
+ host_name = "${var.host_name}"
+ http_port = 80
+ https_port = 443
+ }
+}
\ No newline at end of file
diff --git a/examples/cdn-with-storage-account/outputs.tf b/examples/cdn-with-storage-account/outputs.tf
new file mode 100644
index 000000000000..8f7c1e5c8ad1
--- /dev/null
+++ b/examples/cdn-with-storage-account/outputs.tf
@@ -0,0 +1,3 @@
+output "CDN Endpoint ID" {
+ value = "${azurerm_cdn_endpoint.cdnendpt.name}.azureedge.net"
+}
diff --git a/examples/cdn-with-storage-account/variables.tf b/examples/cdn-with-storage-account/variables.tf
new file mode 100644
index 000000000000..d9bf51015d93
--- /dev/null
+++ b/examples/cdn-with-storage-account/variables.tf
@@ -0,0 +1,18 @@
+variable "resource_group" {
+ description = "The name of the resource group in which to create the virtual network."
+}
+
+variable "location" {
+ description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
+ default = "southcentralus"
+}
+
+variable "storage_account_type" {
+ description = "Specifies the type of the storage account"
+ default = "Standard_LRS"
+}
+
+variable "host_name" {
+ description = "A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address."
+ default = "www.hostnameoforiginserver.com"
+}
\ No newline at end of file
diff --git a/examples/encrypt-running-linux-vm/README.md b/examples/encrypt-running-linux-vm/README.md
new file mode 100644
index 000000000000..85ee3e0f752f
--- /dev/null
+++ b/examples/encrypt-running-linux-vm/README.md
@@ -0,0 +1,44 @@
+# Enable encryption on a running Linux VM.
+
+This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-encrypt-running-linux-vm) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template.
+
+This template enables encryption on a running linux vm using AAD client secret. This template assumes that the VM is located in the same region as the resource group. If not, please edit the template to pass appropriate location for the VM sub-resources.
+
+## Prerequisites:
+Azure Disk Encryption securely stores the encryption secrets in a specified Azure Key Vault.
+
+Create the Key Vault and assign appropriate access policies. You may use this script to ensure that your vault is properly configured: [AzureDiskEncryptionPreRequisiteSetup.ps1](https://github.com/Azure/azure-powershell/blob/10fc37e9141af3fde6f6f79b9d46339b73cf847d/src/ResourceManager/Compute/Commands.Compute/Extension/AzureDiskEncryption/Scripts/AzureDiskEncryptionPreRequisiteSetup.ps1)
+
+Use the below PS cmdlet for getting the `key_vault_secret_url` and `key_vault_resource_id`.
+
+```
+ Get-AzureRmKeyVault -VaultName $KeyVaultName -ResourceGroupName $rgname
+```
+
+References:
+
+- [White paper](https://azure.microsoft.com/en-us/documentation/articles/azure-security-disk-encryption/)
+- [Explore Azure Disk Encryption with Azure Powershell](https://blogs.msdn.microsoft.com/azuresecurity/2015/11/16/explore-azure-disk-encryption-with-azure-powershell/)
+- [Explore Azure Disk Encryption with Azure PowerShell – Part 2](http://blogs.msdn.com/b/azuresecurity/archive/2015/11/21/explore-azure-disk-encryption-with-azure-powershell-part-2.aspx)
+
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file.
+
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+If you are committing this template to source control, please insure that you add this file to your .gitignore file.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![graph](/examples/azure-encrypt-running-linux-vm/graph.png)
diff --git a/examples/encrypt-running-linux-vm/main.tf b/examples/encrypt-running-linux-vm/main.tf
new file mode 100644
index 000000000000..fcd9736aa493
--- /dev/null
+++ b/examples/encrypt-running-linux-vm/main.tf
@@ -0,0 +1,223 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.hostname}vnet"
+ location = "${var.location}"
+ address_space = ["${var.address_space}"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+resource "azurerm_subnet" "subnet" {
+ name = "${var.hostname}subnet"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "${var.subnet_prefix}"
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = "nic"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ ip_configuration {
+ name = "ipconfig"
+ subnet_id = "${azurerm_subnet.subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ }
+}
+
+resource "azurerm_storage_account" "stor" {
+ name = "${var.hostname}stor"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "${var.storage_account_type}"
+}
+
+resource "azurerm_virtual_machine" "vm" {
+ name = "${var.hostname}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ vm_size = "${var.vm_size}"
+ network_interface_ids = ["${azurerm_network_interface.nic.id}"]
+
+ storage_image_reference {
+ publisher = "${var.image_publisher}"
+ offer = "${var.image_offer}"
+ sku = "${var.image_sku}"
+ version = "${var.image_version}"
+ }
+
+ storage_os_disk {
+ name = "${var.hostname}osdisk"
+ create_option = "FromImage"
+ disk_size_gb = "30"
+ }
+
+ os_profile {
+ computer_name = "${var.hostname}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.admin_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+}
+
+resource "azurerm_template_deployment" "linux_vm" {
+ name = "encrypt"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ deployment_mode = "Incremental"
+ depends_on = ["azurerm_virtual_machine.vm"]
+
+ template_body = <**Master subnet:** 10.0.0.0/24
**Node subnet:** 10.0.1.0/24 |
+|Load Balancer |2 probes and two rules for TCP 80 and TCP 443 |
+|Public IP Addresses|OpenShift Master public IP
OpenShift Router public IP attached to Load Balancer |
+|Storage Accounts |2 Storage Accounts |
+|Virtual Machines |Single master
User-defined number of nodes
All VMs include a single attached data disk for Docker thin pool logical volume|
+
+If you have a Red Hat subscription and would like to deploy an OpenShift Container Platform (formerly OpenShift Enterprise) cluster, please visit: https://github.com/Microsoft/openshift-container-platform
+
+### Generate SSH Keys
+
+You'll need to generate an SSH key pair in order to provision this template. Ensure that you do not include a passcode with the private key.
+If you are using a Windows computer, you can download `puttygen.exe`. You will need to export to OpenSSH (from Conversions menu) to get a valid Private Key for use in the Template.
+From a Linux or Mac, you can just use the `ssh-keygen` command. Once you are finished deploying the cluster, you can always generate a new key pair that uses a passphrase and replaces the original one used during initial deployment.
+
+### Create Key Vault to store SSH Private Key
+
+You will need to create a Key Vault to store your SSH Private Key that will then be used as part of the deployment.
+
+1. **Create Key Vault using Powershell**
+ a. Create new resource group: New-AzureRMResourceGroup -Name 'ResourceGroupName' -Location 'West US'
+ b. Create key vault: New-AzureRmKeyVault -VaultName 'KeyVaultName' -ResourceGroup 'ResourceGroupName' -Location 'West US'
+ c. Create variable with sshPrivateKey: $securesecret = ConvertTo-SecureString -String '[copy ssh Private Key here - including line feeds]' -AsPlainText -Force
+ d. Create Secret: Set-AzureKeyVaultSecret -Name 'SecretName' -SecretValue $securesecret -VaultName 'KeyVaultName'
+ e. Enable the Key Vault for Template Deployments: Set-AzureRmKeyVaultAccessPolicy -VaultName 'KeyVaultName' -ResourceGroupName 'ResourceGroupName' -EnabledForTemplateDeployment
+
+2. **Create Key Vault using Azure CLI 1.0**
+ a. Create new Resource Group: azure group create \ \
+ Ex: `azure group create ResourceGroupName 'East US'`
+ b. Create Key Vault: azure keyvault create -u \ -g \ -l \
+ Ex: `azure keyvault create -u KeyVaultName -g ResourceGroupName -l 'East US'`
+ c. Create Secret: azure keyvault secret set -u \ -s \ --file \
+ Ex: `azure keyvault secret set -u KeyVaultName -s SecretName --file ~/.ssh/id_rsa`
+ d. Enable the Keyvvault for Template Deployment: azure keyvault set-policy -u \ --enabled-for-template-deployment true
+ Ex: `azure keyvault set-policy -u KeyVaultName --enabled-for-template-deployment true`
+
+3. **Create Key Vault using Azure CLI 2.0**
+ a. Create new Resource Group: az group create -n \ -l \
+ Ex: `az group create -n ResourceGroupName -l 'East US'`
+ b. Create Key Vault: az keyvault create -n \ -g \ -l \ --enabled-for-template-deployment true
+ Ex: `az keyvault create -n KeyVaultName -g ResourceGroupName -l 'East US' --enabled-for-template-deployment true`
+ c. Create Secret: az keyvault secret set --vault-name \ -n \ --file \
+ Ex: `az keyvault secret set --vault-name KeyVaultName -n SecretName --file ~/.ssh/id_rsa`
+3. **Clone the Openshift repository [here](https://github.com/Microsoft/openshift-origin)**
+ a. Note the local script path, this will be needed for remote-execs on the remote machines.
+
+## Deploy Template
+
+Once you have collected all of the prerequisites for the template, you can deploy the template via terraform.
+
+Monitor deployment via Terraform and get the console URL from outputs of successful deployment which will look something like (if using sample parameters file and "West US 2" location):
+
+`https://me-master1.westus2.cloudapp.azure.com:8443/console`
+
+The cluster will use self-signed certificates. Accept the warning and proceed to the login page.
+
+### NOTE
+
+Ensure combination of openshiftMasterPublicIpDnsLabelPrefix, and nodeLbPublicIpDnsLabelPrefix parameters, combined with the deployment location give you globally unique URL for the cluster or deployment will fail at the step of allocating public IPs with fully-qualified-domain-names as above.
+
+### NOTE
+
+This template deploys a bastion host, merely for the connection provisioner and allowing remote-exec to run commands on machines without public IPs; notice the specific dependencies on the order in which VMs are created for this to work properly.
+
+### NOTE
+
+The OpenShift Ansible playbook does take a while to run when using VMs backed by Standard Storage. VMs backed by Premium Storage are faster. If you want Premimum Storage, select a DS or GS series VM.
+
+Be sure to follow the OpenShift instructions to create the ncessary DNS entry for the OpenShift Router for access to applications.
+
+## Post-Deployment Operations
+
+This template creates an OpenShift user but does not make it a full OpenShift user. To do that, please perform the following.
+
+1. SSH in to master node
+2. Execute the following command:
+
+ ```sh
+ sudo oadm policy add-cluster-role-to-user cluster-admin
+ ```
+### Additional OpenShift Configuration Options
+
+You can configure additional settings per the official [OpenShift Origin Documentation](https://docs.openshift.org/latest/welcome/index.html).
+
+Few options you have
+
+1. Deployment Output
+
+ a. openshiftConsoleUrl the openshift console url
+ b. openshiftMasterSsh ssh command for master node
+ c. openshiftNodeLoadBalancerFQDN node load balancer
+
+ get the deployment output data
+
+ a. portal.azure.com -> choose 'Resource groups' select your group select 'Deployments' and there the deployment 'Microsoft.Template'. As output from the deployment it contains information about the openshift console url, ssh command and load balancer url.
+ b. With the Azure CLI : azure group deployment list <resource group name>
+
+2. add additional users. you can find much detail about this in the openshift.org documentation under 'Cluster Administration' and 'Managing Users'. This installation uses htpasswd as the identity provider. To add more user ssh in to master node and execute following command:
+
+ ```sh
+ sudo htpasswd /etc/origin/master/htpasswd user1
+ ```
+ Now this user can login with the 'oc' CLI tool or the openshift console url.
diff --git a/examples/openshift-origin/main.tf b/examples/openshift-origin/main.tf
new file mode 100644
index 000000000000..7237b84642d1
--- /dev/null
+++ b/examples/openshift-origin/main.tf
@@ -0,0 +1,826 @@
+provider "azurerm" {
+ subscription_id = "${var.subscription_id}"
+ client_id = "${var.aad_client_id}"
+ client_secret = "${var.aad_client_secret}"
+ tenant_id = "${var.tenant_id}"
+}
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group_name}"
+ location = "${var.resource_group_location}"
+}
+
+# ******* NETWORK SECURITY GROUPS ***********
+
+resource "azurerm_network_security_group" "master_nsg" {
+ name = "${var.openshift_cluster_prefix}-master-nsg"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ security_rule {
+ name = "allow_SSH_in_all"
+ description = "Allow SSH in from all locations"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "22"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "allow_HTTPS_all"
+ description = "Allow HTTPS connections from all locations"
+ priority = 200
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "443"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "allow_OpenShift_console_in_all"
+ description = "Allow OpenShift Console connections from all locations"
+ priority = 300
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "8443"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+}
+
+resource "azurerm_network_security_group" "infra_nsg" {
+ name = "${var.openshift_cluster_prefix}-infra-nsg"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ security_rule {
+ name = "allow_SSH_in_all"
+ description = "Allow SSH in from all locations"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "22"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "allow_HTTPS_all"
+ description = "Allow HTTPS connections from all locations"
+ priority = 200
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "443"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "allow_HTTP_in_all"
+ description = "Allow HTTP connections from all locations"
+ priority = 300
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "80"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+}
+
+resource "azurerm_network_security_group" "node_nsg" {
+ name = "${var.openshift_cluster_prefix}-node-nsg"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ security_rule {
+ name = "allow_SSH_in_all"
+ description = "Allow SSH in from all locations"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "22"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "allow_HTTPS_all"
+ description = "Allow HTTPS connections from all locations"
+ priority = 200
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "443"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "allow_HTTP_in_all"
+ description = "Allow HTTP connections from all locations"
+ priority = 300
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "80"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+}
+
+# ******* STORAGE ACCOUNTS ***********
+
+resource "azurerm_storage_account" "bastion_storage_account" {
+ name = "${var.openshift_cluster_prefix}bsa"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "${var.storage_account_type_map["${var.bastion_vm_size}"]}"
+}
+
+resource "azurerm_storage_account" "master_storage_account" {
+ name = "${var.openshift_cluster_prefix}msa"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "${var.storage_account_type_map["${var.master_vm_size}"]}"
+}
+
+resource "azurerm_storage_account" "infra_storage_account" {
+ name = "${var.openshift_cluster_prefix}infrasa"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "${var.storage_account_type_map["${var.infra_vm_size}"]}"
+}
+
+resource "azurerm_storage_account" "nodeos_storage_account" {
+ name = "${var.openshift_cluster_prefix}nodeossa"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "${var.storage_account_type_map["${var.node_vm_size}"]}"
+}
+
+resource "azurerm_storage_account" "nodedata_storage_account" {
+ name = "${var.openshift_cluster_prefix}nodedatasa"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "${var.storage_account_type_map["${var.node_vm_size}"]}"
+}
+
+resource "azurerm_storage_account" "registry_storage_account" {
+ name = "${var.openshift_cluster_prefix}regsa"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "Standard_LRS"
+}
+
+resource "azurerm_storage_account" "persistent_volume_storage_account" {
+ name = "${var.openshift_cluster_prefix}pvsa"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "Standard_LRS"
+}
+
+# ******* AVAILABILITY SETS ***********
+
+resource "azurerm_availability_set" "master" {
+ name = "masteravailabilityset"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+}
+
+resource "azurerm_availability_set" "infra" {
+ name = "infraavailabilityset"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+}
+
+resource "azurerm_availability_set" "node" {
+ name = "nodeavailabilityset"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+}
+
+# ******* IP ADDRESSES ***********
+
+resource "azurerm_public_ip" "bastion_pip" {
+ name = "bastionpip"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ public_ip_address_allocation = "Static"
+ domain_name_label = "${var.openshift_cluster_prefix}-bastion"
+}
+
+resource "azurerm_public_ip" "openshift_master_pip" {
+ name = "masterpip"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ public_ip_address_allocation = "Static"
+ domain_name_label = "${var.openshift_cluster_prefix}"
+}
+
+resource "azurerm_public_ip" "infra_lb_pip" {
+ name = "infraip"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ public_ip_address_allocation = "Static"
+ domain_name_label = "${var.openshift_cluster_prefix}infrapip"
+}
+
+# ******* VNETS / SUBNETS ***********
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "openshiftvnet"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_space = ["10.0.0.0/8"]
+ depends_on = ["azurerm_virtual_network.vnet"]
+}
+
+resource "azurerm_subnet" "master_subnet" {
+ name = "mastersubnet"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "10.1.0.0/16"
+ depends_on = ["azurerm_virtual_network.vnet"]
+}
+
+resource "azurerm_subnet" "node_subnet" {
+ name = "nodesubnet"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "10.2.0.0/16"
+}
+
+# ******* MASTER LOAD BALANCER ***********
+
+resource "azurerm_lb" "master_lb" {
+ name = "masterloadbalancer"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ depends_on = ["azurerm_public_ip.openshift_master_pip"]
+
+ frontend_ip_configuration {
+ name = "LoadBalancerFrontEnd"
+ public_ip_address_id = "${azurerm_public_ip.openshift_master_pip.id}"
+ }
+}
+
+resource "azurerm_lb_backend_address_pool" "master_lb" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ name = "loadBalancerBackEnd"
+ loadbalancer_id = "${azurerm_lb.master_lb.id}"
+ depends_on = ["azurerm_lb.master_lb"]
+}
+
+resource "azurerm_lb_probe" "master_lb" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.master_lb.id}"
+ name = "8443Probe"
+ port = 8443
+ interval_in_seconds = 5
+ number_of_probes = 2
+ protocol = "Tcp"
+ depends_on = ["azurerm_lb.master_lb"]
+}
+
+resource "azurerm_lb_rule" "master_lb" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.master_lb.id}"
+ name = "OpenShiftAdminConsole"
+ protocol = "Tcp"
+ frontend_port = 8443
+ backend_port = 8443
+ frontend_ip_configuration_name = "LoadBalancerFrontEnd"
+ backend_address_pool_id = "${azurerm_lb_backend_address_pool.master_lb.id}"
+ load_distribution = "SourceIP"
+ idle_timeout_in_minutes = 30
+ probe_id = "${azurerm_lb_probe.master_lb.id}"
+ enable_floating_ip = false
+ depends_on = ["azurerm_lb_probe.master_lb", "azurerm_lb.master_lb", "azurerm_lb_backend_address_pool.master_lb"]
+}
+
+resource "azurerm_lb_nat_rule" "master_lb" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.master_lb.id}"
+ name = "${azurerm_lb.master_lb.name}-SSH-${count.index}"
+ protocol = "Tcp"
+ frontend_port = "${count.index + 2200}"
+ backend_port = 22
+ frontend_ip_configuration_name = "LoadBalancerFrontEnd"
+ count = "${var.master_instance_count}"
+ depends_on = ["azurerm_lb.master_lb"]
+}
+
+# ******* INFRA LOAD BALANCER ***********
+
+resource "azurerm_lb" "infra_lb" {
+ name = "infraloadbalancer"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ depends_on = ["azurerm_public_ip.infra_lb_pip"]
+
+ frontend_ip_configuration {
+ name = "LoadBalancerFrontEnd"
+ public_ip_address_id = "${azurerm_public_ip.infra_lb_pip.id}"
+ }
+}
+
+resource "azurerm_lb_backend_address_pool" "infra_lb" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ name = "loadBalancerBackEnd"
+ loadbalancer_id = "${azurerm_lb.infra_lb.id}"
+ depends_on = ["azurerm_lb.infra_lb"]
+}
+
+resource "azurerm_lb_probe" "infra_lb_http_probe" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.infra_lb.id}"
+ name = "httpProbe"
+ port = 80
+ interval_in_seconds = 5
+ number_of_probes = 2
+ protocol = "Tcp"
+ depends_on = ["azurerm_lb.infra_lb"]
+}
+
+resource "azurerm_lb_probe" "infra_lb_https_probe" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.infra_lb.id}"
+ name = "httpsProbe"
+ port = 443
+ interval_in_seconds = 5
+ number_of_probes = 2
+ protocol = "Tcp"
+}
+
+resource "azurerm_lb_rule" "infra_lb_http" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.infra_lb.id}"
+ name = "OpenShiftRouterHTTP"
+ protocol = "Tcp"
+ frontend_port = 80
+ backend_port = 80
+ frontend_ip_configuration_name = "LoadBalancerFrontEnd"
+ backend_address_pool_id = "${azurerm_lb_backend_address_pool.infra_lb.id}"
+ probe_id = "${azurerm_lb_probe.infra_lb_http_probe.id}"
+ depends_on = ["azurerm_lb_probe.infra_lb_http_probe", "azurerm_lb.infra_lb", "azurerm_lb_backend_address_pool.infra_lb"]
+}
+
+resource "azurerm_lb_rule" "infra_lb_https" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.infra_lb.id}"
+ name = "OpenShiftRouterHTTPS"
+ protocol = "Tcp"
+ frontend_port = 443
+ backend_port = 443
+ frontend_ip_configuration_name = "LoadBalancerFrontEnd"
+ backend_address_pool_id = "${azurerm_lb_backend_address_pool.infra_lb.id}"
+ probe_id = "${azurerm_lb_probe.infra_lb_https_probe.id}"
+ depends_on = ["azurerm_lb_probe.infra_lb_https_probe", "azurerm_lb_backend_address_pool.infra_lb"]
+}
+
+# ******* NETWORK INTERFACES ***********
+
+resource "azurerm_network_interface" "bastion_nic" {
+ name = "bastionnic${count.index}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ network_security_group_id = "${azurerm_network_security_group.master_nsg.id}"
+
+ ip_configuration {
+ name = "bastionip${count.index}"
+ subnet_id = "${azurerm_subnet.master_subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ public_ip_address_id = "${azurerm_public_ip.bastion_pip.id}"
+ }
+}
+
+resource "azurerm_network_interface" "master_nic" {
+ name = "masternic${count.index}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ network_security_group_id = "${azurerm_network_security_group.master_nsg.id}"
+ count = "${var.master_instance_count}"
+
+ ip_configuration {
+ name = "masterip${count.index}"
+ subnet_id = "${azurerm_subnet.master_subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.master_lb.id}"]
+ load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_rule.master_lb.*.id, count.index)}"]
+ }
+}
+
+resource "azurerm_network_interface" "infra_nic" {
+ name = "infra_nic${count.index}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ network_security_group_id = "${azurerm_network_security_group.infra_nsg.id}"
+ count = "${var.infra_instance_count}"
+
+ ip_configuration {
+ name = "infraip${count.index}"
+ subnet_id = "${azurerm_subnet.master_subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.infra_lb.id}"]
+ }
+}
+
+resource "azurerm_network_interface" "node_nic" {
+ name = "node_nic${count.index}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ network_security_group_id = "${azurerm_network_security_group.node_nsg.id}"
+ count = "${var.node_instance_count}"
+
+ ip_configuration {
+ name = "nodeip${count.index}"
+ subnet_id = "${azurerm_subnet.node_subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ }
+}
+
+# ******* Bastion Host *******
+
+resource "azurerm_virtual_machine" "bastion" {
+ name = "${var.openshift_cluster_prefix}-bastion-1"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ network_interface_ids = ["${azurerm_network_interface.bastion_nic.id}"]
+ vm_size = "${var.bastion_vm_size}"
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = true
+
+ tags {
+ displayName = "${var.openshift_cluster_prefix}-bastion VM Creation"
+ }
+
+ os_profile {
+ computer_name = "${var.openshift_cluster_prefix}-bastion-${count.index}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.openshift_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = true
+
+ ssh_keys {
+ path = "/home/${var.admin_username}/.ssh/authorized_keys"
+ key_data = "${var.ssh_public_key}"
+ }
+ }
+
+ storage_image_reference {
+ publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}"
+ offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}"
+ sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}"
+ version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}"
+ }
+
+ storage_os_disk {
+ name = "${var.openshift_cluster_prefix}-master-osdisk${count.index}"
+ vhd_uri = "${azurerm_storage_account.bastion_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-bastion-osdisk.vhd"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ disk_size_gb = 60
+ }
+}
+
+# ******* Master VMs *******
+
+resource "azurerm_virtual_machine" "master" {
+ name = "${var.openshift_cluster_prefix}-master-${count.index}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ availability_set_id = "${azurerm_availability_set.master.id}"
+ network_interface_ids = ["${element(azurerm_network_interface.master_nic.*.id, count.index)}"]
+ vm_size = "${var.master_vm_size}"
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = true
+ count = "${var.master_instance_count}"
+ depends_on = ["azurerm_virtual_machine.infra", "azurerm_virtual_machine.node"]
+
+ tags {
+ displayName = "${var.openshift_cluster_prefix}-master VM Creation"
+ }
+
+ connection {
+ host = "${azurerm_public_ip.openshift_master_pip.fqdn}"
+ user = "${var.admin_username}"
+ port = 2200
+ private_key = "${file(var.connection_private_ssh_key_path)}"
+ }
+
+ provisioner "file" {
+ source = "${var.openshift_script_path}/masterPrep.sh"
+ destination = "masterPrep.sh"
+ }
+
+ provisioner "file" {
+ source = "${var.openshift_script_path}/deployOpenShift.sh"
+ destination = "deployOpenShift.sh"
+ }
+
+ provisioner "remote-exec" {
+ inline = [
+ "chmod +x masterPrep.sh",
+ "chmod +x deployOpenShift.sh",
+ "sudo bash masterPrep.sh \"${azurerm_storage_account.persistent_volume_storage_account.name}\" \"${var.admin_username}\" && sudo bash deployOpenShift.sh \"${var.admin_username}\" \"${var.openshift_password}\" \"${var.key_vault_secret}\" \"${var.openshift_cluster_prefix}-master\" \"${azurerm_public_ip.openshift_master_pip.fqdn}\" \"${azurerm_public_ip.openshift_master_pip.ip_address}\" \"${var.openshift_cluster_prefix}-infra\" \"${var.openshift_cluster_prefix}-node\" \"${var.node_instance_count}\" \"${var.infra_instance_count}\" \"${var.master_instance_count}\" \"${var.default_sub_domain_type}\" \"${azurerm_storage_account.registry_storage_account.name}\" \"${azurerm_storage_account.registry_storage_account.primary_access_key}\" \"${var.tenant_id}\" \"${var.subscription_id}\" \"${var.aad_client_id}\" \"${var.aad_client_secret}\" \"${azurerm_resource_group.rg.name}\" \"${azurerm_resource_group.rg.location}\" \"${var.key_vault_name}\""
+ ]
+ }
+
+ os_profile {
+ computer_name = "${var.openshift_cluster_prefix}-master-${count.index}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.openshift_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = true
+
+ ssh_keys {
+ path = "/home/${var.admin_username}/.ssh/authorized_keys"
+ key_data = "${var.ssh_public_key}"
+ }
+ }
+
+ storage_image_reference {
+ publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}"
+ offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}"
+ sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}"
+ version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}"
+ }
+
+ storage_os_disk {
+ name = "${var.openshift_cluster_prefix}-master-osdisk${count.index}"
+ vhd_uri = "${azurerm_storage_account.master_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-master-osdisk${count.index}.vhd"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ disk_size_gb = 60
+ }
+
+ storage_data_disk {
+ name = "${var.openshift_cluster_prefix}-master-docker-pool${count.index}"
+ vhd_uri = "${azurerm_storage_account.master_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-master-docker-pool${count.index}.vhd"
+ disk_size_gb = "${var.data_disk_size}"
+ create_option = "Empty"
+ lun = 0
+ }
+}
+
+# ******* Infra VMs *******
+
+resource "azurerm_virtual_machine" "infra" {
+ name = "${var.openshift_cluster_prefix}-infra-${count.index}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ availability_set_id = "${azurerm_availability_set.infra.id}"
+ network_interface_ids = ["${element(azurerm_network_interface.infra_nic.*.id, count.index)}"]
+ vm_size = "${var.infra_vm_size}"
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = true
+ count = "${var.infra_instance_count}"
+
+ tags {
+ displayName = "${var.openshift_cluster_prefix}-infra VM Creation"
+ }
+
+ connection {
+ type = "ssh"
+ bastion_host = "${azurerm_public_ip.bastion_pip.fqdn}"
+ bastion_user = "${var.admin_username}"
+ bastion_private_key = "${file(var.connection_private_ssh_key_path)}"
+ host = "${element(azurerm_network_interface.infra_nic.*.private_ip_address, count.index)}"
+ user = "${var.admin_username}"
+ private_key = "${file(var.connection_private_ssh_key_path)}"
+ }
+
+ provisioner "file" {
+ source = "${var.openshift_script_path}/nodePrep.sh"
+ destination = "nodePrep.sh"
+ }
+
+ provisioner "remote-exec" {
+ inline = [
+ "chmod +x nodePrep.sh",
+ "sudo bash nodePrep.sh",
+ ]
+ }
+
+ os_profile {
+ computer_name = "${var.openshift_cluster_prefix}-infra-${count.index}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.openshift_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = true
+
+ ssh_keys {
+ path = "/home/${var.admin_username}/.ssh/authorized_keys"
+ key_data = "${var.ssh_public_key}"
+ }
+ }
+
+ storage_image_reference {
+ publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}"
+ offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}"
+ sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}"
+ version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}"
+ }
+
+ storage_os_disk {
+ name = "${var.openshift_cluster_prefix}-infra-osdisk${count.index}"
+ vhd_uri = "${azurerm_storage_account.infra_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-infra-osdisk${count.index}.vhd"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ }
+
+ storage_data_disk {
+ name = "${var.openshift_cluster_prefix}-infra-docker-pool"
+ vhd_uri = "${azurerm_storage_account.infra_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-infra-docker-pool${count.index}.vhd"
+ disk_size_gb = "${var.data_disk_size}"
+ create_option = "Empty"
+ lun = 0
+ }
+}
+
+# ******* Node VMs *******
+
+resource "azurerm_virtual_machine" "node" {
+ name = "${var.openshift_cluster_prefix}-node-${count.index}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ availability_set_id = "${azurerm_availability_set.node.id}"
+ network_interface_ids = ["${element(azurerm_network_interface.node_nic.*.id, count.index)}"]
+ vm_size = "${var.node_vm_size}"
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = true
+ count = "${var.node_instance_count}"
+
+ tags {
+ displayName = "${var.openshift_cluster_prefix}-node VM Creation"
+ }
+
+ connection {
+ type = "ssh"
+ bastion_host = "${azurerm_public_ip.bastion_pip.fqdn}"
+ bastion_user = "${var.admin_username}"
+ bastion_private_key = "${file(var.connection_private_ssh_key_path)}"
+ host = "${element(azurerm_network_interface.node_nic.*.private_ip_address, count.index)}"
+ user = "${var.admin_username}"
+ private_key = "${file(var.connection_private_ssh_key_path)}"
+ }
+
+ provisioner "file" {
+ source = "${var.openshift_script_path}/nodePrep.sh"
+ destination = "nodePrep.sh"
+ }
+
+ provisioner "remote-exec" {
+ inline = [
+ "chmod +x nodePrep.sh",
+ "sudo bash nodePrep.sh",
+ ]
+ }
+
+ os_profile {
+ computer_name = "${var.openshift_cluster_prefix}-node-${count.index}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.openshift_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = true
+
+ ssh_keys {
+ path = "/home/${var.admin_username}/.ssh/authorized_keys"
+ key_data = "${var.ssh_public_key}"
+ }
+ }
+
+ storage_image_reference {
+ publisher = "${lookup(var.os_image_map, join("_publisher", list(var.os_image, "")))}"
+ offer = "${lookup(var.os_image_map, join("_offer", list(var.os_image, "")))}"
+ sku = "${lookup(var.os_image_map, join("_sku", list(var.os_image, "")))}"
+ version = "${lookup(var.os_image_map, join("_version", list(var.os_image, "")))}"
+ }
+
+ storage_os_disk {
+ name = "${var.openshift_cluster_prefix}-node-osdisk"
+ vhd_uri = "${azurerm_storage_account.nodeos_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-node-osdisk${count.index}.vhd"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ }
+
+ storage_data_disk {
+ name = "${var.openshift_cluster_prefix}-node-docker-pool${count.index}"
+ vhd_uri = "${azurerm_storage_account.nodeos_storage_account.primary_blob_endpoint}vhds/${var.openshift_cluster_prefix}-node-docker-pool${count.index}.vhd"
+ disk_size_gb = "${var.data_disk_size}"
+ create_option = "Empty"
+ lun = 0
+ }
+}
+
+# ******* VM EXTENSIONS *******
+
+
+# resource "azurerm_virtual_machine_extension" "deploy_open_shift_master" {
+# name = "masterOpShExt${count.index}"
+# location = "${azurerm_resource_group.rg.location}"
+# resource_group_name = "${azurerm_resource_group.rg.name}"
+# virtual_machine_name = "${element(azurerm_virtual_machine.master.*.name, count.index)}"
+# publisher = "Microsoft.Azure.Extensions"
+# type = "CustomScript"
+# type_handler_version = "2.0"
+# auto_upgrade_minor_version = true
+# depends_on = ["azurerm_virtual_machine.master", "azurerm_virtual_machine_extension.node_prep", "azurerm_storage_container.vhds", "azurerm_virtual_machine_extension.deploy_infra"]
+#
+# settings = <Traffic Manager routing methods for details of the different routing methods available.
+- Create or update a Traffic Manager profile for details of the JSON elements relating to a Traffic Manager profile.
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![`terraform graph`](/examples/azure-traffic-manager-vm/graph.png)
diff --git a/examples/traffic-manager-vm/main.tf b/examples/traffic-manager-vm/main.tf
new file mode 100644
index 000000000000..ef34a8ad5391
--- /dev/null
+++ b/examples/traffic-manager-vm/main.tf
@@ -0,0 +1,125 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_public_ip" "pip" {
+ name = "ip${count.index}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ public_ip_address_allocation = "dynamic"
+ domain_name_label = "${var.dns_name}${count.index}"
+ count = "${var.num_vms}"
+}
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.vnet}"
+ location = "${var.location}"
+ address_space = ["${var.address_space}"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+resource "azurerm_subnet" "subnet" {
+ name = "${var.subnet_name}"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "${var.subnet_prefix}"
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = "nic${count.index}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ count = "${var.num_vms}"
+
+ ip_configuration {
+ name = "ipconfig${count.index}"
+ subnet_id = "${azurerm_subnet.subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ public_ip_address_id = "${element(azurerm_public_ip.pip.*.id, count.index)}"
+ }
+}
+
+resource "azurerm_virtual_machine" "vm" {
+ name = "vm${count.index}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ vm_size = "${var.vm_size}"
+ count = "${var.num_vms}"
+ network_interface_ids = ["${element(azurerm_network_interface.nic.*.id, count.index)}"]
+
+ storage_image_reference {
+ publisher = "${var.image_publisher}"
+ offer = "${var.image_offer}"
+ sku = "${var.image_sku}"
+ version = "${var.image_version}"
+ }
+
+ storage_os_disk {
+ name = "osdisk${count.index}"
+ create_option = "FromImage"
+ }
+
+ os_profile {
+ computer_name = "vm${count.index}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.admin_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+}
+
+resource "azurerm_virtual_machine_extension" "ext" {
+ depends_on = ["azurerm_virtual_machine.vm"]
+ name = "CustomScript"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ virtual_machine_name = "vm${count.index}"
+ publisher = "Microsoft.Azure.Extensions"
+ type = "CustomScript"
+ type_handler_version = "2.0"
+ count = "${var.num_vms}"
+ auto_upgrade_minor_version = true
+
+ settings = < Prerequisite - The generalized image VHD should exist, as well as a Storage Account for boot diagnostics
+
+This template allows you to create a Virtual Machine from an unmanaged User image vhd. This template also deploys a Virtual Network, Public IP addresses and a Network Interface.
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![graph](/examples/azure-vm-from-user-image/graph.png)
diff --git a/examples/vm-from-user-image/main.tf b/examples/vm-from-user-image/main.tf
new file mode 100644
index 000000000000..1295afb2ba16
--- /dev/null
+++ b/examples/vm-from-user-image/main.tf
@@ -0,0 +1,73 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.hostname}vnet"
+ location = "${var.location}"
+ address_space = ["${var.address_space}"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+resource "azurerm_subnet" "subnet" {
+ name = "${var.hostname}subnet"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "${var.subnet_prefix}"
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = "${var.hostname}nic"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ ip_configuration {
+ name = "${var.hostname}ipconfig"
+ subnet_id = "${azurerm_subnet.subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ public_ip_address_id = "${azurerm_public_ip.pip.id}"
+ }
+}
+
+resource "azurerm_public_ip" "pip" {
+ name = "${var.hostname}-ip"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ public_ip_address_allocation = "Dynamic"
+ domain_name_label = "${var.hostname}"
+}
+
+resource "azurerm_virtual_machine" "vm" {
+ name = "${var.hostname}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ vm_size = "${var.vm_size}"
+ network_interface_ids = ["${azurerm_network_interface.nic.id}"]
+
+ storage_os_disk {
+ name = "${var.hostname}-osdisk1"
+ image_uri = "${var.image_uri}"
+ vhd_uri = "https://${var.storage_account_name}.blob.core.windows.net/vhds/${var.hostname}-osdisk.vhd"
+ os_type = "${var.os_type}"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ }
+
+ os_profile {
+ computer_name = "${var.hostname}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.admin_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+}
diff --git a/examples/vm-from-user-image/outputs.tf b/examples/vm-from-user-image/outputs.tf
new file mode 100644
index 000000000000..58a17046f1f4
--- /dev/null
+++ b/examples/vm-from-user-image/outputs.tf
@@ -0,0 +1,11 @@
+output "hostname" {
+ value = "${var.hostname}"
+}
+
+output "vm_fqdn" {
+ value = "${azurerm_public_ip.pip.fqdn}"
+}
+
+output "ssh_command" {
+ value = "${concat("ssh ", var.admin_username, "@", azurerm_public_ip.pip.fqdn)}"
+}
diff --git a/examples/vm-from-user-image/variables.tf b/examples/vm-from-user-image/variables.tf
new file mode 100644
index 000000000000..133c02bbbf5c
--- /dev/null
+++ b/examples/vm-from-user-image/variables.tf
@@ -0,0 +1,55 @@
+variable "resource_group" {
+ description = "The name of the resource group in which the image to clone resides."
+ default = "myrg"
+}
+
+variable "image_uri" {
+ description = "Specifies the image_uri in the form publisherName:offer:skus:version. image_uri can also specify the VHD uri of a custom VM image to clone."
+}
+
+variable "os_type" {
+ description = "Specifies the operating system Type, valid values are windows, linux."
+ default = "linux"
+}
+
+variable "location" {
+ description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
+ default = "southcentralus"
+}
+
+variable "address_space" {
+ description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
+ default = "10.0.0.0/24"
+}
+
+variable "subnet_prefix" {
+ description = "The address prefix to use for the subnet."
+ default = "10.0.0.0/24"
+}
+
+variable "storage_account_name" {
+ description = "The name of the storage account in which the image from which you are cloning resides."
+}
+
+variable "storage_account_type" {
+ description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
+ default = "Premium_LRS"
+}
+
+variable "vm_size" {
+ description = "Specifies the size of the virtual machine. This must be the same as the vm image from which you are copying."
+ default = "Standard_DS1_v2"
+}
+
+variable "hostname" {
+ description = "VM name referenced also in storage-related names. This is also used as the label for the Domain Name and to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system."
+}
+
+variable "admin_username" {
+ description = "administrator user name"
+ default = "vmadmin"
+}
+
+variable "admin_password" {
+ description = "The Password for the account specified in the 'admin_username' field. We recommend disabling Password Authentication in a Production environment."
+}
diff --git a/examples/vm-simple-linux-managed-disk/README.md b/examples/vm-simple-linux-managed-disk/README.md
new file mode 100644
index 000000000000..4a6b2ef9b239
--- /dev/null
+++ b/examples/vm-simple-linux-managed-disk/README.md
@@ -0,0 +1,22 @@
+# Very simple deployment of a Linux VM
+
+This template allows you to deploy a simple Linux VM using a few different options for the Ubuntu version, using the latest patched version. This will deploy an A0 size VM in the resource group location and return the FQDN of the VM.
+
+This template takes a minimum amount of parameters and deploys a Linux VM, using the latest patched version.
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![graph](/examples/azure-vm-simple-linux-managed-disk/graph.png)
diff --git a/examples/vm-simple-linux-managed-disk/main.tf b/examples/vm-simple-linux-managed-disk/main.tf
new file mode 100644
index 000000000000..5dc9ce1cb086
--- /dev/null
+++ b/examples/vm-simple-linux-managed-disk/main.tf
@@ -0,0 +1,108 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.virtual_network_name}"
+ location = "${var.location}"
+ address_space = ["${var.address_space}"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+resource "azurerm_subnet" "subnet" {
+ name = "${var.rg_prefix}subnet"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "${var.subnet_prefix}"
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = "${var.rg_prefix}nic"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ ip_configuration {
+ name = "${var.rg_prefix}ipconfig"
+ subnet_id = "${azurerm_subnet.subnet.id}"
+ private_ip_address_allocation = "Dynamic"
+ public_ip_address_id = "${azurerm_public_ip.pip.id}"
+ }
+}
+
+resource "azurerm_public_ip" "pip" {
+ name = "${var.rg_prefix}-ip"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ public_ip_address_allocation = "Dynamic"
+ domain_name_label = "${var.dns_name}"
+}
+
+resource "azurerm_storage_account" "stor" {
+ name = "${var.dns_name}stor"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ account_type = "${var.storage_account_type}"
+}
+
+resource "azurerm_managed_disk" "datadisk" {
+ name = "${var.hostname}-datadisk"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ storage_account_type = "Standard_LRS"
+ create_option = "Empty"
+ disk_size_gb = "1023"
+}
+
+resource "azurerm_virtual_machine" "vm" {
+ name = "${var.rg_prefix}vm"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ vm_size = "${var.vm_size}"
+ network_interface_ids = ["${azurerm_network_interface.nic.id}"]
+
+ storage_image_reference {
+ publisher = "${var.image_publisher}"
+ offer = "${var.image_offer}"
+ sku = "${var.image_sku}"
+ version = "${var.image_version}"
+ }
+
+ storage_os_disk {
+ name = "${var.hostname}-osdisk"
+ managed_disk_type = "Standard_LRS"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ }
+
+ storage_data_disk {
+ name = "${var.hostname}-datadisk"
+ managed_disk_id = "${azurerm_managed_disk.datadisk.id}"
+ managed_disk_type = "Standard_LRS"
+ disk_size_gb = "1023"
+ create_option = "Attach"
+ lun = 0
+ }
+
+ os_profile {
+ computer_name = "${var.hostname}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.admin_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+
+ boot_diagnostics {
+ enabled = true
+ storage_uri = "${azurerm_storage_account.stor.primary_blob_endpoint}"
+ }
+}
\ No newline at end of file
diff --git a/examples/vm-simple-linux-managed-disk/outputs.tf b/examples/vm-simple-linux-managed-disk/outputs.tf
new file mode 100644
index 000000000000..32c6294ceeab
--- /dev/null
+++ b/examples/vm-simple-linux-managed-disk/outputs.tf
@@ -0,0 +1,11 @@
+output "hostname" {
+ value = "${var.hostname}"
+}
+
+output "vm_fqdn" {
+ value = "${azurerm_public_ip.pip.fqdn}"
+}
+
+output "ssh_command" {
+ value = "ssh ${var.admin_username}@${azurerm_public_ip.pip.fqdn}"
+}
\ No newline at end of file
diff --git a/examples/vm-simple-linux-managed-disk/variables.tf b/examples/vm-simple-linux-managed-disk/variables.tf
new file mode 100644
index 000000000000..91024000bc03
--- /dev/null
+++ b/examples/vm-simple-linux-managed-disk/variables.tf
@@ -0,0 +1,75 @@
+variable "resource_group" {
+ description = "The name of the resource group in which to create the virtual network."
+}
+
+variable "rg_prefix" {
+ description = "The shortened abbreviation to represent your resource group that will go on the front of some resources."
+ default = "rg"
+}
+
+variable "hostname" {
+ description = "VM name referenced also in storage-related names."
+}
+
+variable "dns_name" {
+ description = " Label for the Domain Name. Will be used to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system."
+}
+
+variable "location" {
+ description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
+ default = "southcentralus"
+}
+
+variable "virtual_network_name" {
+ description = "The name for the virtual network."
+ default = "vnet"
+}
+
+variable "address_space" {
+ description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
+ default = "10.0.0.0/16"
+}
+
+variable "subnet_prefix" {
+ description = "The address prefix to use for the subnet."
+ default = "10.0.10.0/24"
+}
+
+variable "storage_account_type" {
+ description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
+ default = "Standard_LRS"
+}
+
+variable "vm_size" {
+ description = "Specifies the size of the virtual machine."
+ default = "Standard_A0"
+}
+
+variable "image_publisher" {
+ description = "name of the publisher of the image (az vm image list)"
+ default = "Canonical"
+}
+
+variable "image_offer" {
+ description = "the name of the offer (az vm image list)"
+ default = "UbuntuServer"
+}
+
+variable "image_sku" {
+ description = "image sku to apply (az vm image list)"
+ default = "16.04-LTS"
+}
+
+variable "image_version" {
+ description = "version of the image to apply (az vm image list)"
+ default = "latest"
+}
+
+variable "admin_username" {
+ description = "administrator user name"
+ default = "vmadmin"
+}
+
+variable "admin_password" {
+ description = "administrator password (recommended to disable password auth)"
+}
\ No newline at end of file
diff --git a/examples/vm-specialized-vhd-existing-vnet/README.md b/examples/vm-specialized-vhd-existing-vnet/README.md
new file mode 100644
index 000000000000..3afc0f8471c5
--- /dev/null
+++ b/examples/vm-specialized-vhd-existing-vnet/README.md
@@ -0,0 +1,35 @@
+# Create a specialized virtual machine in an existing virtual network [![Build Status](https://travis-ci.org/harijayms/terraform.svg?branch=topic-201-vm-specialized-vhd-existing-vnet)](https://travis-ci.org/harijayms/terraform)
+
+This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/201-vm-specialized-vhd-existing-vnet) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected in this Terraform template.
+
+## Prerequisites
+
+- VHD file from which to create a VM that already exists in a storage account
+- Name of the existing VNET and subnet to which the new virtual machine will connect
+- Name of the Resource Group in which the VNET resides
+
+
+### NOTE
+
+This template will create an additional Standard_GRS storage account for enabling boot diagnostics each time you execute this template. To avoid running into storage account limits, it is best to delete the storage account when the VM is deleted.
+
+This template creates a VM from a specialized VHD and lets you connect it to an existing VNET that can reside in a different Resource Group from which the virtual machine resides.
+
+_Please note: This deployment template does not create or attach an existing Network Security Group to the virtual machine._
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![graph](/examples/azure-vm-specialized-vhd-existing-vnet/graph.png)
diff --git a/examples/vm-specialized-vhd-existing-vnet/main.tf b/examples/vm-specialized-vhd-existing-vnet/main.tf
new file mode 100644
index 000000000000..821ee8c31c63
--- /dev/null
+++ b/examples/vm-specialized-vhd-existing-vnet/main.tf
@@ -0,0 +1,71 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_public_ip" "pip" {
+ name = "PublicIp"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ public_ip_address_allocation = "Dynamic"
+ domain_name_label = "${var.hostname}"
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = "nic"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ ip_configuration {
+ name = "ipconfig"
+ subnet_id = "${var.existing_subnet_id}"
+ private_ip_address_allocation = "Dynamic"
+ public_ip_address_id = "${azurerm_public_ip.pip.id}"
+ }
+}
+
+resource "azurerm_storage_account" "stor" {
+ name = "${var.hostname}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${var.location}"
+ account_type = "${var.storage_account_type}"
+}
+
+resource "azurerm_virtual_machine" "vm" {
+ name = "${var.hostname}"
+ location = "${var.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ vm_size = "${var.vm_size}"
+ network_interface_ids = ["${azurerm_network_interface.nic.id}"]
+
+ storage_os_disk {
+ name = "${var.hostname}osdisk1"
+ image_uri = "${var.os_disk_vhd_uri}"
+ vhd_uri = "https://${var.existing_storage_acct}.blob.core.windows.net/${var.existing_vnet_resource_group}-vhds/${var.hostname}osdisk.vhd"
+ os_type = "${var.os_type}"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ }
+
+ os_profile {
+ computer_name = "${var.hostname}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.admin_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+
+ boot_diagnostics {
+ enabled = true
+ storage_uri = "${azurerm_storage_account.stor.primary_blob_endpoint}"
+ }
+}
diff --git a/examples/vm-specialized-vhd-existing-vnet/outputs.tf b/examples/vm-specialized-vhd-existing-vnet/outputs.tf
new file mode 100644
index 000000000000..13768e554d6c
--- /dev/null
+++ b/examples/vm-specialized-vhd-existing-vnet/outputs.tf
@@ -0,0 +1,11 @@
+output "hostname" {
+ value = "${var.hostname}"
+}
+
+output "vm_fqdn" {
+ value = "${azurerm_public_ip.pip.fqdn}"
+}
+
+output "ssh_command" {
+ value = "ssh ${var.admin_username}@${azurerm_public_ip.pip.fqdn}"
+}
diff --git a/examples/vm-specialized-vhd-existing-vnet/variables.tf b/examples/vm-specialized-vhd-existing-vnet/variables.tf
new file mode 100644
index 000000000000..4e53919a84d0
--- /dev/null
+++ b/examples/vm-specialized-vhd-existing-vnet/variables.tf
@@ -0,0 +1,90 @@
+variable "resource_group" {
+ description = "Name of the resource group in which to deploy your new Virtual Machine"
+}
+
+variable "existing_vnet_resource_group" {
+ description = "Name of the existing resource group in which the existing vnet resides"
+}
+
+variable "location" {
+ description = "The location/region where the virtual network resides."
+ default = "southcentralus"
+}
+
+variable "hostname" {
+ description = "This variable is used in this template to create the domain name label as well as the virtual machine name. Must be unique."
+}
+
+variable "os_type" {
+ description = "Type of OS on the existing vhd. Allowed values: 'windows' or 'linux'."
+ default = "linux"
+}
+
+variable "os_disk_vhd_uri" {
+ description = "Uri of the existing VHD in ARM standard or premium storage"
+}
+
+variable "existing_storage_acct" {
+ description = "The name of the storage account in which your existing VHD and image reside"
+}
+
+variable "existing_virtual_network_name" {
+ description = "The name for the existing virtual network"
+}
+
+variable "existing_subnet_name" {
+ description = "The name for the existing subnet in the existing virtual network"
+}
+
+variable "existing_subnet_id" {
+ description = "The id for the existing subnet in the existing virtual network"
+}
+
+variable "address_space" {
+ description = "The address space that is used by the virtual network. You can supply more than one address space. Changing this forces a new resource to be created."
+ default = "10.0.0.0/16"
+}
+
+variable "subnet_prefix" {
+ description = "The address prefix to use for the subnet."
+ default = "10.0.10.0/24"
+}
+
+variable "storage_account_type" {
+ description = "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS. Changing this is sometimes valid - see the Azure documentation for more information on which types of accounts can be converted into other types."
+ default = "Standard_GRS"
+}
+
+variable "vm_size" {
+ description = "Specifies the size of the virtual machine."
+ default = "Standard_DS1_v2"
+}
+
+variable "image_publisher" {
+ description = "name of the publisher of the image (az vm image list)"
+ default = "Canonical"
+}
+
+variable "image_offer" {
+ description = "the name of the offer (az vm image list)"
+ default = "UbuntuServer"
+}
+
+variable "image_sku" {
+ description = "image sku to apply (az vm image list)"
+ default = "16.04-LTS"
+}
+
+variable "image_version" {
+ description = "version of the image to apply (az vm image list)"
+ default = "latest"
+}
+
+variable "admin_username" {
+ description = "administrator user name"
+ default = "vmadmin"
+}
+
+variable "admin_password" {
+ description = "administrator password (recommended to disable password auth)"
+}
diff --git a/examples/vmss-ubuntu/README.md b/examples/vmss-ubuntu/README.md
new file mode 100644
index 000000000000..f6208df84300
--- /dev/null
+++ b/examples/vmss-ubuntu/README.md
@@ -0,0 +1,22 @@
+# Linux VM Scale Set
+
+This template deploys a desired count Linux VM Scale Set. Once the VMSS is deployed, the user can deploy an application inside each of the VMs (either by directly logging into the VMs or via a [`remote-exec` provisioner](https://www.terraform.io/docs/provisioners/remote-exec.html)).
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file.
+
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![`terraform graph`](/examples/azure-vmss-ubuntu/graph.png)
diff --git a/examples/vmss-ubuntu/main.tf b/examples/vmss-ubuntu/main.tf
new file mode 100644
index 000000000000..84480abbd7d4
--- /dev/null
+++ b/examples/vmss-ubuntu/main.tf
@@ -0,0 +1,127 @@
+# provider "azurerm" {
+# subscription_id = "${var.subscription_id}"
+# client_id = "${var.client_id}"
+# client_secret = "${var.client_secret}"
+# tenant_id = "${var.tenant_id}"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.resource_group}vnet"
+ location = "${azurerm_resource_group.rg.location}"
+ address_space = ["10.0.0.0/16"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+resource "azurerm_subnet" "subnet" {
+ name = "subnet"
+ address_prefix = "10.0.0.0/24"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+}
+
+resource "azurerm_public_ip" "pip" {
+ name = "${var.hostname}-pip"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ public_ip_address_allocation = "Dynamic"
+ domain_name_label = "${var.hostname}"
+}
+
+resource "azurerm_lb" "lb" {
+ name = "LoadBalancer"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ depends_on = ["azurerm_public_ip.pip"]
+
+ frontend_ip_configuration {
+ name = "LBFrontEnd"
+ public_ip_address_id = "${azurerm_public_ip.pip.id}"
+ }
+}
+
+resource "azurerm_lb_backend_address_pool" "backlb" {
+ name = "BackEndAddressPool"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+}
+
+resource "azurerm_lb_nat_pool" "np" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ name = "NATPool"
+ protocol = "Tcp"
+ frontend_port_start = 50000
+ frontend_port_end = 50119
+ backend_port = 22
+ frontend_ip_configuration_name = "LBFrontEnd"
+}
+
+resource "azurerm_storage_account" "stor" {
+ name = "${var.resource_group}stor"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ account_type = "${var.storage_account_type}"
+}
+
+resource "azurerm_storage_container" "vhds" {
+ name = "vhds"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ storage_account_name = "${azurerm_storage_account.stor.name}"
+ container_access_type = "blob"
+}
+
+resource "azurerm_virtual_machine_scale_set" "scaleset" {
+ name = "autoscalewad"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ upgrade_policy_mode = "Manual"
+ overprovision = true
+ depends_on = ["azurerm_lb.lb", "azurerm_virtual_network.vnet"]
+
+ sku {
+ name = "${var.vm_sku}"
+ tier = "Standard"
+ capacity = "${var.instance_count}"
+ }
+
+ os_profile {
+ computer_name_prefix = "${var.vmss_name}"
+ admin_username = "${var.admin_username}"
+ admin_password = "${var.admin_password}"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+
+ network_profile {
+ name = "${var.hostname}-nic"
+ primary = true
+
+ ip_configuration {
+ name = "${var.hostname}ipconfig"
+ subnet_id = "${azurerm_subnet.subnet.id}"
+ load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.backlb.id}"]
+ load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_pool.np.*.id, count.index)}"]
+ }
+ }
+
+ storage_profile_os_disk {
+ name = "${var.hostname}"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ vhd_containers = ["${azurerm_storage_account.stor.primary_blob_endpoint}${azurerm_storage_container.vhds.name}"]
+ }
+
+ storage_profile_image_reference {
+ publisher = "${var.image_publisher}"
+ offer = "${var.image_offer}"
+ sku = "${var.ubuntu_os_version}"
+ version = "latest"
+ }
+}
diff --git a/examples/vmss-ubuntu/outputs.tf b/examples/vmss-ubuntu/outputs.tf
new file mode 100644
index 000000000000..3eba047a26fb
--- /dev/null
+++ b/examples/vmss-ubuntu/outputs.tf
@@ -0,0 +1,3 @@
+output "hostname" {
+ value = "${var.vmss_name}"
+}
diff --git a/examples/vmss-ubuntu/variables.tf b/examples/vmss-ubuntu/variables.tf
new file mode 100644
index 000000000000..513ce167b41e
--- /dev/null
+++ b/examples/vmss-ubuntu/variables.tf
@@ -0,0 +1,59 @@
+# variable "subscription_id" {}
+# variable "client_id" {}
+# variable "client_secret" {}
+# variable "tenant_id" {}
+
+variable "resource_group" {
+ description = "The name of the resource group in which to create the virtual network."
+}
+
+variable "location" {
+ description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
+ default = "southcentralus"
+}
+
+variable "storage_account_type" {
+ description = "Specifies the type of the storage account"
+ default = "Standard_LRS"
+}
+
+variable "hostname" {
+ description = "A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address."
+}
+
+variable "vm_sku" {
+ description = "Size of VMs in the VM Scale Set."
+ default = "Standard_A1"
+}
+
+variable "ubuntu_os_version" {
+ description = "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values are: 15.10, 14.04.4-LTS."
+ default = "16.04.0-LTS"
+}
+
+variable "image_publisher" {
+ description = "The name of the publisher of the image (az vm image list)"
+ default = "Canonical"
+}
+
+variable "image_offer" {
+ description = "The name of the offer (az vm image list)"
+ default = "UbuntuServer"
+}
+
+variable "vmss_name" {
+ description = "String used as a base for naming resources. Must be 3-61 characters in length and globally unique across Azure. A hash is prepended to this string for some resources, and resource-specific information is appended."
+}
+
+variable "instance_count" {
+ description = "Number of VM instances (100 or less)."
+ default = "5"
+}
+
+variable "admin_username" {
+ description = "Admin username on all VMs."
+}
+
+variable "admin_password" {
+ description = "Admin password on all VMs."
+}
diff --git a/examples/vnet-to-vnet-peering/README.md b/examples/vnet-to-vnet-peering/README.md
new file mode 100644
index 000000000000..36a90a46ffe8
--- /dev/null
+++ b/examples/vnet-to-vnet-peering/README.md
@@ -0,0 +1,24 @@
+# VNET to VNET Peering
+
+This template creates two VNETs in the same location, each containing a single subnet, and creates connections between them using VNET Peering.
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file.
+
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![`terraform graph`](/examples/azure-vnet-to-vnet-peering/graph.png)
diff --git a/examples/vnet-to-vnet-peering/main.tf b/examples/vnet-to-vnet-peering/main.tf
new file mode 100644
index 000000000000..6bdfb8a240e3
--- /dev/null
+++ b/examples/vnet-to-vnet-peering/main.tf
@@ -0,0 +1,56 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_virtual_network" "vnet1" {
+ name = "${var.resource_group}-vnet1"
+ location = "${var.location}"
+ address_space = ["10.0.0.0/24"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ subnet {
+ name = "subnet1"
+ address_prefix = "10.0.0.0/24"
+ }
+}
+
+resource "azurerm_virtual_network" "vnet2" {
+ name = "${var.resource_group}-vnet2"
+ location = "${var.location}"
+ address_space = ["192.168.0.0/24"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+
+ subnet {
+ name = "subnet1"
+ address_prefix = "192.168.0.0/24"
+ }
+}
+
+resource "azurerm_virtual_network_peering" "peer1" {
+ name = "vNet1-to-vNet2"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ virtual_network_name = "${azurerm_virtual_network.vnet1.name}"
+ remote_virtual_network_id = "${azurerm_virtual_network.vnet2.id}"
+ allow_virtual_network_access = true
+ allow_forwarded_traffic = false
+ allow_gateway_transit = false
+}
+
+resource "azurerm_virtual_network_peering" "peer2" {
+ name = "vNet2-to-vNet1"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ virtual_network_name = "${azurerm_virtual_network.vnet2.name}"
+ remote_virtual_network_id = "${azurerm_virtual_network.vnet1.id}"
+ allow_virtual_network_access = true
+ allow_forwarded_traffic = false
+ allow_gateway_transit = false
+ use_remote_gateways = false
+}
diff --git a/examples/vnet-to-vnet-peering/variables.tf b/examples/vnet-to-vnet-peering/variables.tf
new file mode 100644
index 000000000000..2701af343e0f
--- /dev/null
+++ b/examples/vnet-to-vnet-peering/variables.tf
@@ -0,0 +1,9 @@
+variable "resource_group" {
+ description = "The name of the resource group in which the virtual networks are created"
+ default = "myrg"
+}
+
+variable "location" {
+ description = "The location/region where the virtual networks are created. Changing this forces a new resource to be created."
+ default = "southcentralus"
+}
diff --git a/examples/vnet-two-subnets/README.md b/examples/vnet-two-subnets/README.md
new file mode 100644
index 000000000000..d8c36ea6cf2c
--- /dev/null
+++ b/examples/vnet-two-subnets/README.md
@@ -0,0 +1,20 @@
+# Virtual Network with Two Subnets
+
+This template allows you to create a Virtual Network with two subnets.
+
+## main.tf
+The `main.tf` file contains the actual resources that will be deployed. It also contains the Azure Resource Group definition and any defined variables.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
+
+![graph](/examples/azure-vnet-two-subnets/graph.png)
diff --git a/examples/vnet-two-subnets/main.tf b/examples/vnet-two-subnets/main.tf
new file mode 100644
index 000000000000..aee3593f3efa
--- /dev/null
+++ b/examples/vnet-two-subnets/main.tf
@@ -0,0 +1,32 @@
+# provider "azurerm" {
+# subscription_id = "REPLACE-WITH-YOUR-SUBSCRIPTION-ID"
+# client_id = "REPLACE-WITH-YOUR-CLIENT-ID"
+# client_secret = "REPLACE-WITH-YOUR-CLIENT-SECRET"
+# tenant_id = "REPLACE-WITH-YOUR-TENANT-ID"
+# }
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.resource_group}vnet"
+ location = "${var.location}"
+ address_space = ["10.0.0.0/16"]
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+resource "azurerm_subnet" "subnet1" {
+ name = "subnet1"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "10.0.0.0/24"
+}
+
+resource "azurerm_subnet" "subnet2" {
+ name = "subnet2"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ address_prefix = "10.0.1.0/24"
+}
diff --git a/examples/vnet-two-subnets/variables.tf b/examples/vnet-two-subnets/variables.tf
new file mode 100644
index 000000000000..8d5dd4131636
--- /dev/null
+++ b/examples/vnet-two-subnets/variables.tf
@@ -0,0 +1,8 @@
+variable "resource_group" {
+ description = "The name of the resource group in which to create the virtual network."
+}
+
+variable "location" {
+ description = "The location/region where the virtual network is created. Changing this forces a new resource to be created."
+ default = "southcentralus"
+}
diff --git a/examples/wordpress-mysql-replication/README.md b/examples/wordpress-mysql-replication/README.md
new file mode 100644
index 000000000000..b080397bc6e5
--- /dev/null
+++ b/examples/wordpress-mysql-replication/README.md
@@ -0,0 +1,41 @@
+# Deploys a WordPress web site backed by MySQL master-slave replication
+
+This Terraform template was based on [this](https://github.com/Azure/azure-quickstart-templates/tree/master/wordpress-mysql-replication) Azure Quickstart Template. Changes to the ARM template that may have occurred since the creation of this example may not be reflected here.
+
+This template deploys a WordPress site in Azure backed by MySQL replication with one master and one slave server. It has the following capabilities:
+
+- Installs and configures GTID based MySQL replication on CentOS 6
+- Deploys a load balancer in front of the 2 MySQL VMs
+- MySQL, SSH, and MySQL probe ports are exposed through the load balancer using Network Security Group rules.
+- WordPress accesses MySQL through the load balancer.
+- Configures an http based health probe for each MySQL instance that can be used to monitor MySQL health.
+- WordPress deployment starts immediately after MySQL deployment finishes.
+- Details about MySQL management, including failover, can be found [here](https://github.com/Azure/azure-quickstart-templates/tree/master/mysql-replication).
+
+If you would like to leverage an existing VNET, then please see the [documentation here](https://www.terraform.io/docs/import/index.html) to learn about importing existing resources into Terraform and bringing them under state management by this template. To import your existing VNET, you may use this command.
+
+```
+terraform import azurerm_virtual_network.testNetwork /subscriptions//resourceGroups//providers/Microsoft.Network/virtualNetworks/
+```
+
+## main.tf
+The `main.tf` file contains the resources necessary for the MySql replication deployment that will be created. It also contains the Azure Resource Group definition and any defined variables.
+
+## website.tf
+The `website.tf` contains an `azurerm_template_deployment` that will deploy the Wordpress website.
+
+## outputs.tf
+This data is outputted when `terraform apply` is called, and can be queried using the `terraform output` command.
+
+## provider.tf
+You may leave the provider block in the `main.tf`, as it is in this template, or you can create a file called `provider.tf` and add it to your `.gitignore` file.
+
+Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details). Please go [here](https://www.terraform.io/docs/providers/azurerm/) for full instructions on how to create this to populate your `provider.tf` file.
+
+## terraform.tfvars
+If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, Terraform automatically loads them to populate variables. We don't recommend saving usernames and password to version control, but you can create a local secret variables file and use the `-var-file` flag or the `.auto.tfvars` extension to load it.
+
+If you are committing this template to source control, please insure that you add this file to your `.gitignore` file.
+
+## variables.tf
+The `variables.tf` file contains all of the input parameters that the user can specify when deploying this Terraform template.
diff --git a/examples/wordpress-mysql-replication/main.tf b/examples/wordpress-mysql-replication/main.tf
new file mode 100644
index 000000000000..a91933b1d171
--- /dev/null
+++ b/examples/wordpress-mysql-replication/main.tf
@@ -0,0 +1,244 @@
+# provider "azurerm" {
+# subscription_id = "${var.subscription_id}"
+# client_id = "${var.client_id}"
+# client_secret = "${var.client_secret}"
+# tenant_id = "${var.tenant_id}"
+# }
+
+# ********************** MYSQL REPLICATION ********************** #
+
+resource "azurerm_resource_group" "rg" {
+ name = "${var.resource_group}"
+ location = "${var.location}"
+}
+
+# ********************** VNET / SUBNET ********************** #
+resource "azurerm_virtual_network" "vnet" {
+ name = "${var.virtual_network_name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ address_space = ["${var.vnet_address_prefix}"]
+}
+
+resource "azurerm_subnet" "db_subnet" {
+ name = "${var.db_subnet_name}"
+ virtual_network_name = "${azurerm_virtual_network.vnet.name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ network_security_group_id = "${azurerm_network_security_group.nsg.id}"
+ address_prefix = "${var.db_subnet_address_prefix}"
+ depends_on = ["azurerm_virtual_network.vnet"]
+}
+
+# ********************** STORAGE ACCOUNTS ********************** #
+resource "azurerm_storage_account" "stor" {
+ name = "${var.unique_prefix}${var.storage_account_name}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ account_type = "${var.storage_account_type}"
+}
+
+# ********************** NETWORK SECURITY GROUP ********************** #
+resource "azurerm_network_security_group" "nsg" {
+ name = "${var.unique_prefix}-nsg"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+
+ security_rule {
+ name = "allow-ssh"
+ description = "Allow SSH"
+ priority = 100
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "22"
+ source_address_prefix = "Internet"
+ destination_address_prefix = "*"
+ }
+
+ security_rule {
+ name = "MySQL"
+ description = "MySQL"
+ priority = 110
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_range = "3306"
+ source_address_prefix = "*"
+ destination_address_prefix = "*"
+ }
+}
+
+# ********************** PUBLIC IP ADDRESSES ********************** #
+resource "azurerm_public_ip" "pip" {
+ name = "${var.public_ip_name}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ public_ip_address_allocation = "Static"
+ domain_name_label = "${var.dns_name}"
+}
+
+# ********************** AVAILABILITY SET ********************** #
+resource "azurerm_availability_set" "availability_set" {
+ name = "${var.dns_name}-set"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+}
+
+# ********************** NETWORK INTERFACES ********************** #
+resource "azurerm_network_interface" "nic" {
+ name = "${var.nic_name}${count.index}"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ network_security_group_id = "${azurerm_network_security_group.nsg.id}"
+ count = "${var.node_count}"
+ depends_on = ["azurerm_virtual_network.vnet", "azurerm_public_ip.pip", "azurerm_lb.lb"]
+
+ ip_configuration {
+ name = "ipconfig${count.index}"
+ subnet_id = "${azurerm_subnet.db_subnet.id}"
+ private_ip_address_allocation = "Static"
+ private_ip_address = "10.0.1.${count.index + 4}"
+ load_balancer_backend_address_pools_ids = ["${azurerm_lb_backend_address_pool.backend_pool.id}"]
+
+ load_balancer_inbound_nat_rules_ids = [
+ "${element(azurerm_lb_nat_rule.NatRule0.*.id, count.index)}",
+ "${element(azurerm_lb_nat_rule.MySQLNatRule0.*.id, count.index)}",
+ "${element(azurerm_lb_nat_rule.ProbeNatRule0.*.id, count.index)}",
+ ]
+ }
+}
+
+# ********************** LOAD BALANCER ********************** #
+resource "azurerm_lb" "lb" {
+ name = "${var.dns_name}-lb"
+ location = "${azurerm_resource_group.rg.location}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ depends_on = ["azurerm_public_ip.pip"]
+
+ frontend_ip_configuration {
+ name = "${var.dns_name}-sshIPCfg"
+ public_ip_address_id = "${azurerm_public_ip.pip.id}"
+ }
+}
+
+resource "azurerm_lb_backend_address_pool" "backend_pool" {
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ name = "${var.dns_name}-ilbBackendPool"
+}
+
+# ********************** LOAD BALANCER INBOUND NAT RULES ********************** #
+resource "azurerm_lb_nat_rule" "NatRule0" {
+ name = "${var.dns_name}-NatRule-${count.index}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ protocol = "tcp"
+ frontend_port = "6400${count.index + 1}"
+ backend_port = 22
+ frontend_ip_configuration_name = "${var.dns_name}-sshIPCfg"
+ count = "${var.node_count}"
+ depends_on = ["azurerm_lb.lb"]
+}
+
+resource "azurerm_lb_nat_rule" "MySQLNatRule0" {
+ name = "${var.dns_name}-MySQLNatRule-${count.index}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ protocol = "tcp"
+ frontend_port = "330${count.index + 6}"
+ backend_port = 3306
+ frontend_ip_configuration_name = "${var.dns_name}-sshIPCfg"
+ count = "${var.node_count}"
+ depends_on = ["azurerm_lb.lb"]
+}
+
+resource "azurerm_lb_nat_rule" "ProbeNatRule0" {
+ name = "${var.dns_name}-ProbeNatRule-${count.index}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ loadbalancer_id = "${azurerm_lb.lb.id}"
+ protocol = "tcp"
+ frontend_port = "920${count.index}"
+ backend_port = 9200
+ frontend_ip_configuration_name = "${var.dns_name}-sshIPCfg"
+ count = "${var.node_count}"
+ depends_on = ["azurerm_lb.lb"]
+}
+
+# ********************** VIRTUAL MACHINES ********************** #
+resource "azurerm_virtual_machine" "vm" {
+ name = "${var.dns_name}${count.index}"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ vm_size = "${var.vm_size}"
+ network_interface_ids = ["${element(azurerm_network_interface.nic.*.id, count.index)}"]
+ count = "${var.node_count}"
+ availability_set_id = "${azurerm_availability_set.availability_set.id}"
+ depends_on = ["azurerm_availability_set.availability_set", "azurerm_network_interface.nic", "azurerm_storage_account.stor"]
+
+ storage_image_reference {
+ publisher = "${var.image_publisher}"
+ offer = "${var.image_offer}"
+ sku = "${var.os_version}"
+ version = "latest"
+ }
+
+ storage_os_disk {
+ name = "osdisk${count.index}"
+ vhd_uri = "https://${azurerm_storage_account.stor.name}.blob.core.windows.net/vhds/${var.dns_name}${count.index}-osdisk.vhd"
+ create_option = "FromImage"
+ caching = "ReadWrite"
+ }
+
+ os_profile {
+ computer_name = "${var.dns_name}${count.index}"
+ admin_username = "${var.vm_admin_username}"
+ admin_password = "${var.vm_admin_password}"
+ }
+
+ storage_data_disk {
+ name = "datadisk1"
+ vhd_uri = "https://${azurerm_storage_account.stor.name}.blob.core.windows.net/vhds/${var.dns_name}${count.index}-datadisk1.vhd"
+ disk_size_gb = "1000"
+ create_option = "Empty"
+ lun = 0
+ }
+
+ storage_data_disk {
+ name = "datadisk2"
+ vhd_uri = "https://${azurerm_storage_account.stor.name}.blob.core.windows.net/vhds/${var.dns_name}${count.index}-datadisk2.vhd"
+ disk_size_gb = "1000"
+ create_option = "Empty"
+ lun = 1
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+}
+
+resource "azurerm_virtual_machine_extension" "setup_mysql" {
+ name = "${var.dns_name}-${count.index}-setupMySQL"
+ resource_group_name = "${azurerm_resource_group.rg.name}"
+ location = "${azurerm_resource_group.rg.location}"
+ virtual_machine_name = "${element(azurerm_virtual_machine.vm.*.name, count.index)}"
+ publisher = "Microsoft.Azure.Extensions"
+ type = "CustomScript"
+ type_handler_version = "2.0"
+ auto_upgrade_minor_version = true
+ count = "${var.node_count}"
+ depends_on = ["azurerm_virtual_machine.vm", "azurerm_lb_nat_rule.ProbeNatRule0"]
+
+ settings = < 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ return preparer.Prepare(&http.Request{})
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) CreateOrUpdateResponder(resp *http.Response) (result RecordSet, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes a record set from a DNS zone. This operation cannot be
+// undone.
+//
+// resourceGroupName is the name of the resource group. zoneName is the name of
+// the DNS zone (without a terminating dot). relativeRecordSetName is the name
+// of the record set, relative to the name of the zone. recordType is the type
+// of DNS record in this record set. Record sets of type SOA cannot be deleted
+// (they are deleted when the DNS zone is deleted). ifMatch is the etag of the
+// record set. Omit this value to always delete the current record set. Specify
+// the last-seen etag value to prevent accidentally deleting any concurrent
+// changes.
+func (client RecordSetsClient) Delete(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string) (result autorest.Response, err error) {
+ req, err := client.DeletePreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// DeletePreparer prepares the Delete request.
+func (client RecordSetsClient) DeletePreparer(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "recordType": autorest.Encode("path", recordType),
+ "relativeRecordSetName": relativeRecordSetName,
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "zoneName": autorest.Encode("path", zoneName),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare(&http.Request{})
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get gets a record set.
+//
+// resourceGroupName is the name of the resource group. zoneName is the name of
+// the DNS zone (without a terminating dot). relativeRecordSetName is the name
+// of the record set, relative to the name of the zone. recordType is the type
+// of DNS record in this record set.
+func (client RecordSetsClient) Get(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (result RecordSet, err error) {
+ req, err := client.GetPreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client RecordSetsClient) GetPreparer(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "recordType": autorest.Encode("path", recordType),
+ "relativeRecordSetName": relativeRecordSetName,
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "zoneName": autorest.Encode("path", zoneName),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) GetSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) GetResponder(resp *http.Response) (result RecordSet, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByDNSZone lists all record sets in a DNS zone.
+//
+// resourceGroupName is the name of the resource group. zoneName is the name of
+// the DNS zone (without a terminating dot). top is the maximum number of
+// record sets to return. If not specified, returns up to 100 record sets.
+func (client RecordSetsClient) ListByDNSZone(resourceGroupName string, zoneName string, top *int32) (result RecordSetListResult, err error) {
+ req, err := client.ListByDNSZonePreparer(resourceGroupName, zoneName, top)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByDNSZoneSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByDNSZoneResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByDNSZonePreparer prepares the ListByDNSZone request.
+func (client RecordSetsClient) ListByDNSZonePreparer(resourceGroupName string, zoneName string, top *int32) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "zoneName": autorest.Encode("path", zoneName),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/recordsets", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListByDNSZoneSender sends the ListByDNSZone request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) ListByDNSZoneSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListByDNSZoneResponder handles the response to the ListByDNSZone request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) ListByDNSZoneResponder(resp *http.Response) (result RecordSetListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByDNSZoneNextResults retrieves the next set of results, if any.
+func (client RecordSetsClient) ListByDNSZoneNextResults(lastResults RecordSetListResult) (result RecordSetListResult, err error) {
+ req, err := lastResults.RecordSetListResultPreparer()
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+
+ resp, err := client.ListByDNSZoneSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure sending next results request")
+ }
+
+ result, err = client.ListByDNSZoneResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure responding to next results request")
+ }
+
+ return
+}
+
+// ListByType lists the record sets of a specified type in a DNS zone.
+//
+// resourceGroupName is the name of the resource group. zoneName is the name of
+// the DNS zone (without a terminating dot). recordType is the type of record
+// sets to enumerate. top is the maximum number of record sets to return. If
+// not specified, returns up to 100 record sets.
+func (client RecordSetsClient) ListByType(resourceGroupName string, zoneName string, recordType RecordType, top *int32) (result RecordSetListResult, err error) {
+ req, err := client.ListByTypePreparer(resourceGroupName, zoneName, recordType, top)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByTypeSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByTypeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByTypePreparer prepares the ListByType request.
+func (client RecordSetsClient) ListByTypePreparer(resourceGroupName string, zoneName string, recordType RecordType, top *int32) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "recordType": autorest.Encode("path", recordType),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "zoneName": autorest.Encode("path", zoneName),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListByTypeSender sends the ListByType request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) ListByTypeSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListByTypeResponder handles the response to the ListByType request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) ListByTypeResponder(resp *http.Response) (result RecordSetListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByTypeNextResults retrieves the next set of results, if any.
+func (client RecordSetsClient) ListByTypeNextResults(lastResults RecordSetListResult) (result RecordSetListResult, err error) {
+ req, err := lastResults.RecordSetListResultPreparer()
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+
+ resp, err := client.ListByTypeSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure sending next results request")
+ }
+
+ result, err = client.ListByTypeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure responding to next results request")
+ }
+
+ return
+}
+
+// Update updates a record set within a DNS zone.
+//
+// resourceGroupName is the name of the resource group. zoneName is the name of
+// the DNS zone (without a terminating dot). relativeRecordSetName is the name
+// of the record set, relative to the name of the zone. recordType is the type
+// of DNS record in this record set. parameters is parameters supplied to the
+// Update operation. ifMatch is the etag of the record set. Omit this value to
+// always overwrite the current record set. Specify the last-seen etag value to
+// prevent accidentally overwritting concurrent changes.
+func (client RecordSetsClient) Update(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string) (result RecordSet, err error) {
+ req, err := client.UpdatePreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.UpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.UpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// UpdatePreparer prepares the Update request.
+func (client RecordSetsClient) UpdatePreparer(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "recordType": autorest.Encode("path", recordType),
+ "relativeRecordSetName": relativeRecordSetName,
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "zoneName": autorest.Encode("path", zoneName),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare(&http.Request{})
+}
+
+// UpdateSender sends the Update request. The method will close the
+// http.Response Body if it receives an error.
+func (client RecordSetsClient) UpdateSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// UpdateResponder handles the response to the Update request. The method always
+// closes the http.Response Body.
+func (client RecordSetsClient) UpdateResponder(resp *http.Response) (result RecordSet, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/version.go
new file mode 100755
index 000000000000..054f065276c5
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/version.go
@@ -0,0 +1,29 @@
+package dns
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/v10.0.2-beta arm-dns/2016-04-01"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return "v10.0.2-beta"
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go
new file mode 100755
index 000000000000..31ad0ba36f7b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go
@@ -0,0 +1,463 @@
+package dns
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "net/http"
+)
+
+// ZonesClient is the the DNS Management Client.
+type ZonesClient struct {
+ ManagementClient
+}
+
+// NewZonesClient creates an instance of the ZonesClient client.
+func NewZonesClient(subscriptionID string) ZonesClient {
+ return NewZonesClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewZonesClientWithBaseURI creates an instance of the ZonesClient client.
+func NewZonesClientWithBaseURI(baseURI string, subscriptionID string) ZonesClient {
+ return ZonesClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CreateOrUpdate creates or updates a DNS zone. Does not modify DNS records
+// within the zone.
+//
+// resourceGroupName is the name of the resource group. zoneName is the name of
+// the DNS zone (without a terminating dot). parameters is parameters supplied
+// to the CreateOrUpdate operation. ifMatch is the etag of the DNS zone. Omit
+// this value to always overwrite the current zone. Specify the last-seen etag
+// value to prevent accidentally overwritting any concurrent changes.
+// ifNoneMatch is set to '*' to allow a new DNS zone to be created, but to
+// prevent updating an existing zone. Other values will be ignored.
+func (client ZonesClient) CreateOrUpdate(resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (result Zone, err error) {
+ req, err := client.CreateOrUpdatePreparer(resourceGroupName, zoneName, parameters, ifMatch, ifNoneMatch)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client ZonesClient) CreateOrUpdatePreparer(resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "zoneName": autorest.Encode("path", zoneName),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ if len(ifNoneMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch)))
+ }
+ return preparer.Prepare(&http.Request{})
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client ZonesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client ZonesClient) CreateOrUpdateResponder(resp *http.Response) (result Zone, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes a DNS zone. WARNING: All DNS records in the zone will also be
+// deleted. This operation cannot be undone. This method may poll for
+// completion. Polling can be canceled by passing the cancel channel argument.
+// The channel will be used to cancel polling and any outstanding HTTP
+// requests.
+//
+// resourceGroupName is the name of the resource group. zoneName is the name of
+// the DNS zone (without a terminating dot). ifMatch is the etag of the DNS
+// zone. Omit this value to always delete the current zone. Specify the
+// last-seen etag value to prevent accidentally deleting any concurrent
+// changes.
+func (client ZonesClient) Delete(resourceGroupName string, zoneName string, ifMatch string, cancel <-chan struct{}) (<-chan ZoneDeleteResult, <-chan error) {
+ resultChan := make(chan ZoneDeleteResult, 1)
+ errChan := make(chan error, 1)
+ go func() {
+ var err error
+ var result ZoneDeleteResult
+ defer func() {
+ resultChan <- result
+ errChan <- err
+ close(resultChan)
+ close(errChan)
+ }()
+ req, err := client.DeletePreparer(resourceGroupName, zoneName, ifMatch, cancel)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", resp, "Failure responding to request")
+ }
+ }()
+ return resultChan, errChan
+}
+
+// DeletePreparer prepares the Delete request.
+func (client ZonesClient) DeletePreparer(resourceGroupName string, zoneName string, ifMatch string, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "zoneName": autorest.Encode("path", zoneName),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ if len(ifMatch) > 0 {
+ preparer = autorest.DecoratePreparer(preparer,
+ autorest.WithHeader("If-Match", autorest.String(ifMatch)))
+ }
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client ZonesClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client ZonesClient) DeleteResponder(resp *http.Response) (result ZoneDeleteResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Get gets a DNS zone. Retrieves the zone properties, but not the record sets
+// within the zone.
+//
+// resourceGroupName is the name of the resource group. zoneName is the name of
+// the DNS zone (without a terminating dot).
+func (client ZonesClient) Get(resourceGroupName string, zoneName string) (result Zone, err error) {
+ req, err := client.GetPreparer(resourceGroupName, zoneName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client ZonesClient) GetPreparer(resourceGroupName string, zoneName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ "zoneName": autorest.Encode("path", zoneName),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client ZonesClient) GetSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client ZonesClient) GetResponder(resp *http.Response) (result Zone, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists the DNS zones in all resource groups in a subscription.
+//
+// top is the maximum number of DNS zones to return. If not specified, returns
+// up to 100 zones.
+func (client ZonesClient) List(top *int32) (result ZoneListResult, err error) {
+ req, err := client.ListPreparer(top)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client ZonesClient) ListPreparer(top *int32) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnszones", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client ZonesClient) ListSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client ZonesClient) ListResponder(resp *http.Response) (result ZoneListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListNextResults retrieves the next set of results, if any.
+func (client ZonesClient) ListNextResults(lastResults ZoneListResult) (result ZoneListResult, err error) {
+ req, err := lastResults.ZoneListResultPreparer()
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "List", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure sending next results request")
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure responding to next results request")
+ }
+
+ return
+}
+
+// ListByResourceGroup lists the DNS zones within a resource group.
+//
+// resourceGroupName is the name of the resource group. top is the maximum
+// number of record sets to return. If not specified, returns up to 100 record
+// sets.
+func (client ZonesClient) ListByResourceGroup(resourceGroupName string, top *int32) (result ZoneListResult, err error) {
+ req, err := client.ListByResourceGroupPreparer(resourceGroupName, top)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client ZonesClient) ListByResourceGroupPreparer(resourceGroupName string, top *int32) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2016-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+ if top != nil {
+ queryParameters["$top"] = autorest.Encode("query", *top)
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client ZonesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client ZonesClient) ListByResourceGroupResponder(resp *http.Response) (result ZoneListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroupNextResults retrieves the next set of results, if any.
+func (client ZonesClient) ListByResourceGroupNextResults(lastResults ZoneListResult) (result ZoneListResult, err error) {
+ req, err := lastResults.ZoneListResultPreparer()
+ if err != nil {
+ return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", nil, "Failure preparing next results request")
+ }
+ if req == nil {
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure sending next results request")
+ }
+
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure responding to next results request")
+ }
+
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/client.go
new file mode 100755
index 000000000000..5c0752c01209
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/client.go
@@ -0,0 +1,53 @@
+// Package documentdb implements the Azure ARM Documentdb service API version
+// 2015-04-08.
+//
+// Azure DocumentDB Database Service Resource Provider REST API
+package documentdb
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+const (
+ // DefaultBaseURI is the default URI used for the service Documentdb
+ DefaultBaseURI = "https://management.azure.com"
+)
+
+// ManagementClient is the base client for Documentdb.
+type ManagementClient struct {
+ autorest.Client
+ BaseURI string
+ SubscriptionID string
+}
+
+// New creates an instance of the ManagementClient client.
+func New(subscriptionID string) ManagementClient {
+ return NewWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewWithBaseURI creates an instance of the ManagementClient client.
+func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient {
+ return ManagementClient{
+ Client: autorest.NewClientWithUserAgent(UserAgent()),
+ BaseURI: baseURI,
+ SubscriptionID: subscriptionID,
+ }
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/databaseaccounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/databaseaccounts.go
new file mode 100755
index 000000000000..7d4480742267
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/databaseaccounts.go
@@ -0,0 +1,1069 @@
+package documentdb
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/validation"
+ "net/http"
+)
+
+// DatabaseAccountsClient is the azure DocumentDB Database Service Resource
+// Provider REST API
+type DatabaseAccountsClient struct {
+ ManagementClient
+}
+
+// NewDatabaseAccountsClient creates an instance of the DatabaseAccountsClient
+// client.
+func NewDatabaseAccountsClient(subscriptionID string) DatabaseAccountsClient {
+ return NewDatabaseAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID)
+}
+
+// NewDatabaseAccountsClientWithBaseURI creates an instance of the
+// DatabaseAccountsClient client.
+func NewDatabaseAccountsClientWithBaseURI(baseURI string, subscriptionID string) DatabaseAccountsClient {
+ return DatabaseAccountsClient{NewWithBaseURI(baseURI, subscriptionID)}
+}
+
+// CheckNameExists checks that the Azure DocumentDB account name already
+// exists. A valid account name may contain only lowercase letters, numbers,
+// and the '-' character, and must be between 3 and 50 characters.
+//
+// accountName is documentDB database account name.
+func (client DatabaseAccountsClient) CheckNameExists(accountName string) (result autorest.Response, err error) {
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "CheckNameExists")
+ }
+
+ req, err := client.CheckNameExistsPreparer(accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CheckNameExists", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CheckNameExistsSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CheckNameExists", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CheckNameExistsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CheckNameExists", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CheckNameExistsPreparer prepares the CheckNameExists request.
+func (client DatabaseAccountsClient) CheckNameExistsPreparer(accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsHead(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/providers/Microsoft.DocumentDB/databaseAccountNames/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// CheckNameExistsSender sends the CheckNameExists request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) CheckNameExistsSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// CheckNameExistsResponder handles the response to the CheckNameExists request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) CheckNameExistsResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// CreateOrUpdate creates or updates an Azure DocumentDB database account. This
+// method may poll for completion. Polling can be canceled by passing the
+// cancel channel argument. The channel will be used to cancel polling and any
+// outstanding HTTP requests.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name. createUpdateParameters is the parameters
+// to provide for the current database account.
+func (client DatabaseAccountsClient) CreateOrUpdate(resourceGroupName string, accountName string, createUpdateParameters DatabaseAccountCreateUpdateParameters, cancel <-chan struct{}) (<-chan DatabaseAccount, <-chan error) {
+ resultChan := make(chan DatabaseAccount, 1)
+ errChan := make(chan error, 1)
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
+ {TargetValue: createUpdateParameters,
+ Constraints: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxStalenessPrefix", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxStalenessPrefix", Name: validation.InclusiveMaximum, Rule: 2147483647, Chain: nil},
+ {Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxStalenessPrefix", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ {Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxIntervalInSeconds", Name: validation.Null, Rule: false,
+ Chain: []validation.Constraint{{Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxIntervalInSeconds", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil},
+ {Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.ConsistencyPolicy.MaxIntervalInSeconds", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
+ }},
+ }},
+ {Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.Locations", Name: validation.Null, Rule: true, Chain: nil},
+ {Target: "createUpdateParameters.DatabaseAccountCreateUpdateProperties.DatabaseAccountOfferType", Name: validation.Null, Rule: true, Chain: nil},
+ }}}}}); err != nil {
+ errChan <- validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "CreateOrUpdate")
+ close(errChan)
+ close(resultChan)
+ return resultChan, errChan
+ }
+
+ go func() {
+ var err error
+ var result DatabaseAccount
+ defer func() {
+ resultChan <- result
+ errChan <- err
+ close(resultChan)
+ close(errChan)
+ }()
+ req, err := client.CreateOrUpdatePreparer(resourceGroupName, accountName, createUpdateParameters, cancel)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateOrUpdate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateOrUpdate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "CreateOrUpdate", resp, "Failure responding to request")
+ }
+ }()
+ return resultChan, errChan
+}
+
+// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
+func (client DatabaseAccountsClient) CreateOrUpdatePreparer(resourceGroupName string, accountName string, createUpdateParameters DatabaseAccountCreateUpdateParameters, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}", pathParameters),
+ autorest.WithJSON(createUpdateParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) CreateOrUpdateResponder(resp *http.Response) (result DatabaseAccount, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Delete deletes an existing Azure DocumentDB database account. This method
+// may poll for completion. Polling can be canceled by passing the cancel
+// channel argument. The channel will be used to cancel polling and any
+// outstanding HTTP requests.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name.
+func (client DatabaseAccountsClient) Delete(resourceGroupName string, accountName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
+ resultChan := make(chan autorest.Response, 1)
+ errChan := make(chan error, 1)
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ errChan <- validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "Delete")
+ close(errChan)
+ close(resultChan)
+ return resultChan, errChan
+ }
+
+ go func() {
+ var err error
+ var result autorest.Response
+ defer func() {
+ resultChan <- result
+ errChan <- err
+ close(resultChan)
+ close(errChan)
+ }()
+ req, err := client.DeletePreparer(resourceGroupName, accountName, cancel)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Delete", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.DeleteSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Delete", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.DeleteResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Delete", resp, "Failure responding to request")
+ }
+ }()
+ return resultChan, errChan
+}
+
+// DeletePreparer prepares the Delete request.
+func (client DatabaseAccountsClient) DeletePreparer(resourceGroupName string, accountName string, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsDelete(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// DeleteSender sends the Delete request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) DeleteSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// DeleteResponder handles the response to the Delete request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// FailoverPriorityChange changes the failover priority for the Azure
+// DocumentDB database account. A failover priority of 0 indicates a write
+// region. The maximum value for a failover priority = (total number of regions
+// - 1). Failover priority values must be unique for each of the regions in
+// which the database account exists. This method may poll for completion.
+// Polling can be canceled by passing the cancel channel argument. The channel
+// will be used to cancel polling and any outstanding HTTP requests.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name. failoverParameters is the new failover
+// policies for the database account.
+func (client DatabaseAccountsClient) FailoverPriorityChange(resourceGroupName string, accountName string, failoverParameters FailoverPolicies, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
+ resultChan := make(chan autorest.Response, 1)
+ errChan := make(chan error, 1)
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ errChan <- validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "FailoverPriorityChange")
+ close(errChan)
+ close(resultChan)
+ return resultChan, errChan
+ }
+
+ go func() {
+ var err error
+ var result autorest.Response
+ defer func() {
+ resultChan <- result
+ errChan <- err
+ close(resultChan)
+ close(errChan)
+ }()
+ req, err := client.FailoverPriorityChangePreparer(resourceGroupName, accountName, failoverParameters, cancel)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "FailoverPriorityChange", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.FailoverPriorityChangeSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "FailoverPriorityChange", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.FailoverPriorityChangeResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "FailoverPriorityChange", resp, "Failure responding to request")
+ }
+ }()
+ return resultChan, errChan
+}
+
+// FailoverPriorityChangePreparer prepares the FailoverPriorityChange request.
+func (client DatabaseAccountsClient) FailoverPriorityChangePreparer(resourceGroupName string, accountName string, failoverParameters FailoverPolicies, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/failoverPriorityChange", pathParameters),
+ autorest.WithJSON(failoverParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// FailoverPriorityChangeSender sends the FailoverPriorityChange request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) FailoverPriorityChangeSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// FailoverPriorityChangeResponder handles the response to the FailoverPriorityChange request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) FailoverPriorityChangeResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
+// Get retrieves the properties of an existing Azure DocumentDB database
+// account.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name.
+func (client DatabaseAccountsClient) Get(resourceGroupName string, accountName string) (result DatabaseAccount, err error) {
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "Get")
+ }
+
+ req, err := client.GetPreparer(resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Get", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Get", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Get", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetPreparer prepares the Get request.
+func (client DatabaseAccountsClient) GetPreparer(resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// GetSender sends the Get request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) GetSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// GetResponder handles the response to the Get request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) GetResponder(resp *http.Response) (result DatabaseAccount, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// List lists all the Azure DocumentDB database accounts available under the
+// subscription.
+func (client DatabaseAccountsClient) List() (result DatabaseAccountsListResult, err error) {
+ req, err := client.ListPreparer()
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "List", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "List", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "List", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListPreparer prepares the List request.
+func (client DatabaseAccountsClient) ListPreparer() (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DocumentDB/databaseAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListSender sends the List request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) ListSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListResponder handles the response to the List request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) ListResponder(resp *http.Response) (result DatabaseAccountsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListByResourceGroup lists all the Azure DocumentDB database accounts
+// available under the given resource group.
+//
+// resourceGroupName is name of an Azure resource group.
+func (client DatabaseAccountsClient) ListByResourceGroup(resourceGroupName string) (result DatabaseAccountsListResult, err error) {
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
+ return result, validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "ListByResourceGroup")
+ }
+
+ req, err := client.ListByResourceGroupPreparer(resourceGroupName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListByResourceGroup", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListByResourceGroupSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListByResourceGroup", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListByResourceGroupResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListByResourceGroup", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
+func (client DatabaseAccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) ListByResourceGroupResponder(resp *http.Response) (result DatabaseAccountsListResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListConnectionStrings lists the connection strings for the specified Azure
+// DocumentDB database account.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name.
+func (client DatabaseAccountsClient) ListConnectionStrings(resourceGroupName string, accountName string) (result DatabaseAccountListConnectionStringsResult, err error) {
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "ListConnectionStrings")
+ }
+
+ req, err := client.ListConnectionStringsPreparer(resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListConnectionStrings", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListConnectionStringsSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListConnectionStrings", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListConnectionStringsResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListConnectionStrings", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListConnectionStringsPreparer prepares the ListConnectionStrings request.
+func (client DatabaseAccountsClient) ListConnectionStringsPreparer(resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listConnectionStrings", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListConnectionStringsSender sends the ListConnectionStrings request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) ListConnectionStringsSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListConnectionStringsResponder handles the response to the ListConnectionStrings request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) ListConnectionStringsResponder(resp *http.Response) (result DatabaseAccountListConnectionStringsResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListKeys lists the access keys for the specified Azure DocumentDB database
+// account.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name.
+func (client DatabaseAccountsClient) ListKeys(resourceGroupName string, accountName string) (result DatabaseAccountListKeysResult, err error) {
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "ListKeys")
+ }
+
+ req, err := client.ListKeysPreparer(resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListKeysSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListKeysPreparer prepares the ListKeys request.
+func (client DatabaseAccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/listKeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListKeysSender sends the ListKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListKeysResponder handles the response to the ListKeys request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) ListKeysResponder(resp *http.Response) (result DatabaseAccountListKeysResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// ListReadOnlyKeys lists the read-only access keys for the specified Azure
+// DocumentDB database account.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name.
+func (client DatabaseAccountsClient) ListReadOnlyKeys(resourceGroupName string, accountName string) (result DatabaseAccountListReadOnlyKeysResult, err error) {
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ return result, validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "ListReadOnlyKeys")
+ }
+
+ req, err := client.ListReadOnlyKeysPreparer(resourceGroupName, accountName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListReadOnlyKeys", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.ListReadOnlyKeysSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListReadOnlyKeys", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.ListReadOnlyKeysResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "ListReadOnlyKeys", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// ListReadOnlyKeysPreparer prepares the ListReadOnlyKeys request.
+func (client DatabaseAccountsClient) ListReadOnlyKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/readonlykeys", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{})
+}
+
+// ListReadOnlyKeysSender sends the ListReadOnlyKeys request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) ListReadOnlyKeysSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req)
+}
+
+// ListReadOnlyKeysResponder handles the response to the ListReadOnlyKeys request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) ListReadOnlyKeysResponder(resp *http.Response) (result DatabaseAccountListReadOnlyKeysResult, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// Patch patches the properties of an existing Azure DocumentDB database
+// account. This method may poll for completion. Polling can be canceled by
+// passing the cancel channel argument. The channel will be used to cancel
+// polling and any outstanding HTTP requests.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name. updateParameters is the tags parameter to
+// patch for the current database account.
+func (client DatabaseAccountsClient) Patch(resourceGroupName string, accountName string, updateParameters DatabaseAccountPatchParameters, cancel <-chan struct{}) (<-chan DatabaseAccount, <-chan error) {
+ resultChan := make(chan DatabaseAccount, 1)
+ errChan := make(chan error, 1)
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ errChan <- validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "Patch")
+ close(errChan)
+ close(resultChan)
+ return resultChan, errChan
+ }
+
+ go func() {
+ var err error
+ var result DatabaseAccount
+ defer func() {
+ resultChan <- result
+ errChan <- err
+ close(resultChan)
+ close(errChan)
+ }()
+ req, err := client.PatchPreparer(resourceGroupName, accountName, updateParameters, cancel)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Patch", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.PatchSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Patch", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.PatchResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "Patch", resp, "Failure responding to request")
+ }
+ }()
+ return resultChan, errChan
+}
+
+// PatchPreparer prepares the Patch request.
+func (client DatabaseAccountsClient) PatchPreparer(resourceGroupName string, accountName string, updateParameters DatabaseAccountPatchParameters, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPatch(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}", pathParameters),
+ autorest.WithJSON(updateParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// PatchSender sends the Patch request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) PatchSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// PatchResponder handles the response to the Patch request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) PatchResponder(resp *http.Response) (result DatabaseAccount, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
+// RegenerateKey regenerates an access key for the specified Azure DocumentDB
+// database account. This method may poll for completion. Polling can be
+// canceled by passing the cancel channel argument. The channel will be used to
+// cancel polling and any outstanding HTTP requests.
+//
+// resourceGroupName is name of an Azure resource group. accountName is
+// documentDB database account name. keyToRegenerate is the name of the key to
+// regenerate.
+func (client DatabaseAccountsClient) RegenerateKey(resourceGroupName string, accountName string, keyToRegenerate DatabaseAccountRegenerateKeyParameters, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
+ resultChan := make(chan autorest.Response, 1)
+ errChan := make(chan error, 1)
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
+ {TargetValue: accountName,
+ Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
+ errChan <- validation.NewErrorWithValidationError(err, "documentdb.DatabaseAccountsClient", "RegenerateKey")
+ close(errChan)
+ close(resultChan)
+ return resultChan, errChan
+ }
+
+ go func() {
+ var err error
+ var result autorest.Response
+ defer func() {
+ resultChan <- result
+ errChan <- err
+ close(resultChan)
+ close(errChan)
+ }()
+ req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, keyToRegenerate, cancel)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "RegenerateKey", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.RegenerateKeySender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "RegenerateKey", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.RegenerateKeyResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "documentdb.DatabaseAccountsClient", "RegenerateKey", resp, "Failure responding to request")
+ }
+ }()
+ return resultChan, errChan
+}
+
+// RegenerateKeyPreparer prepares the RegenerateKey request.
+func (client DatabaseAccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, keyToRegenerate DatabaseAccountRegenerateKeyParameters, cancel <-chan struct{}) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "accountName": autorest.Encode("path", accountName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2015-04-08"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsJSON(),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/regenerateKey", pathParameters),
+ autorest.WithJSON(keyToRegenerate),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare(&http.Request{Cancel: cancel})
+}
+
+// RegenerateKeySender sends the RegenerateKey request. The method will close the
+// http.Response Body if it receives an error.
+func (client DatabaseAccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client,
+ req,
+ azure.DoPollForAsynchronous(client.PollingDelay))
+}
+
+// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always
+// closes the http.Response Body.
+func (client DatabaseAccountsClient) RegenerateKeyResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/models.go
new file mode 100755
index 000000000000..cc2d6de0384e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/models.go
@@ -0,0 +1,210 @@
+package documentdb
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+import (
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// DatabaseAccountKind enumerates the values for database account kind.
+type DatabaseAccountKind string
+
+const (
+ // GlobalDocumentDB specifies the global document db state for database
+ // account kind.
+ GlobalDocumentDB DatabaseAccountKind = "GlobalDocumentDB"
+ // MongoDB specifies the mongo db state for database account kind.
+ MongoDB DatabaseAccountKind = "MongoDB"
+ // Parse specifies the parse state for database account kind.
+ Parse DatabaseAccountKind = "Parse"
+)
+
+// DatabaseAccountOfferType enumerates the values for database account offer
+// type.
+type DatabaseAccountOfferType string
+
+const (
+ // Standard specifies the standard state for database account offer type.
+ Standard DatabaseAccountOfferType = "Standard"
+)
+
+// DefaultConsistencyLevel enumerates the values for default consistency level.
+type DefaultConsistencyLevel string
+
+const (
+ // BoundedStaleness specifies the bounded staleness state for default
+ // consistency level.
+ BoundedStaleness DefaultConsistencyLevel = "BoundedStaleness"
+ // Eventual specifies the eventual state for default consistency level.
+ Eventual DefaultConsistencyLevel = "Eventual"
+ // Session specifies the session state for default consistency level.
+ Session DefaultConsistencyLevel = "Session"
+ // Strong specifies the strong state for default consistency level.
+ Strong DefaultConsistencyLevel = "Strong"
+)
+
+// KeyKind enumerates the values for key kind.
+type KeyKind string
+
+const (
+ // Primary specifies the primary state for key kind.
+ Primary KeyKind = "primary"
+ // PrimaryReadonly specifies the primary readonly state for key kind.
+ PrimaryReadonly KeyKind = "primaryReadonly"
+ // Secondary specifies the secondary state for key kind.
+ Secondary KeyKind = "secondary"
+ // SecondaryReadonly specifies the secondary readonly state for key kind.
+ SecondaryReadonly KeyKind = "secondaryReadonly"
+)
+
+// ConsistencyPolicy is the consistency policy for the DocumentDB database
+// account.
+type ConsistencyPolicy struct {
+ DefaultConsistencyLevel DefaultConsistencyLevel `json:"defaultConsistencyLevel,omitempty"`
+ MaxStalenessPrefix *int64 `json:"maxStalenessPrefix,omitempty"`
+ MaxIntervalInSeconds *int32 `json:"maxIntervalInSeconds,omitempty"`
+}
+
+// DatabaseAccount is a DocumentDB database account.
+type DatabaseAccount struct {
+ autorest.Response `json:"-"`
+ ID *string `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ Kind DatabaseAccountKind `json:"kind,omitempty"`
+ *DatabaseAccountProperties `json:"properties,omitempty"`
+}
+
+// DatabaseAccountConnectionString is connection string for the DocumentDB
+// account
+type DatabaseAccountConnectionString struct {
+ ConnectionString *string `json:"connectionString,omitempty"`
+ Description *string `json:"description,omitempty"`
+}
+
+// DatabaseAccountCreateUpdateParameters is parameters to create and update
+// DocumentDB database accounts.
+type DatabaseAccountCreateUpdateParameters struct {
+ ID *string `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+ Kind DatabaseAccountKind `json:"kind,omitempty"`
+ *DatabaseAccountCreateUpdateProperties `json:"properties,omitempty"`
+}
+
+// DatabaseAccountCreateUpdateProperties is properties to create and update
+// Azure DocumentDB database accounts.
+type DatabaseAccountCreateUpdateProperties struct {
+ ConsistencyPolicy *ConsistencyPolicy `json:"consistencyPolicy,omitempty"`
+ Locations *[]Location `json:"locations,omitempty"`
+ DatabaseAccountOfferType *string `json:"databaseAccountOfferType,omitempty"`
+ IPRangeFilter *string `json:"ipRangeFilter,omitempty"`
+}
+
+// DatabaseAccountListConnectionStringsResult is the connection strings for the
+// given database account.
+type DatabaseAccountListConnectionStringsResult struct {
+ autorest.Response `json:"-"`
+ ConnectionStrings *[]DatabaseAccountConnectionString `json:"connectionStrings,omitempty"`
+}
+
+// DatabaseAccountListKeysResult is the access keys for the given database
+// account.
+type DatabaseAccountListKeysResult struct {
+ autorest.Response `json:"-"`
+ PrimaryMasterKey *string `json:"primaryMasterKey,omitempty"`
+ SecondaryMasterKey *string `json:"secondaryMasterKey,omitempty"`
+ *DatabaseAccountListReadOnlyKeysResult `json:"properties,omitempty"`
+}
+
+// DatabaseAccountListReadOnlyKeysResult is the read-only access keys for the
+// given database account.
+type DatabaseAccountListReadOnlyKeysResult struct {
+ autorest.Response `json:"-"`
+ PrimaryReadonlyMasterKey *string `json:"primaryReadonlyMasterKey,omitempty"`
+ SecondaryReadonlyMasterKey *string `json:"secondaryReadonlyMasterKey,omitempty"`
+}
+
+// DatabaseAccountPatchParameters is parameters for patching Azure DocumentDB
+// database account properties.
+type DatabaseAccountPatchParameters struct {
+ Tags *map[string]*string `json:"tags,omitempty"`
+}
+
+// DatabaseAccountProperties is properties for the database account.
+type DatabaseAccountProperties struct {
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ DocumentEndpoint *string `json:"documentEndpoint,omitempty"`
+ DatabaseAccountOfferType DatabaseAccountOfferType `json:"databaseAccountOfferType,omitempty"`
+ IPRangeFilter *string `json:"ipRangeFilter,omitempty"`
+ ConsistencyPolicy *ConsistencyPolicy `json:"consistencyPolicy,omitempty"`
+ WriteLocations *[]Location `json:"writeLocations,omitempty"`
+ ReadLocations *[]Location `json:"readLocations,omitempty"`
+ FailoverPolicies *[]FailoverPolicy `json:"failoverPolicies,omitempty"`
+}
+
+// DatabaseAccountRegenerateKeyParameters is parameters to regenerate the keys
+// within the database account.
+type DatabaseAccountRegenerateKeyParameters struct {
+ KeyKind KeyKind `json:"keyKind,omitempty"`
+}
+
+// DatabaseAccountsListResult is the List operation response, that contains the
+// database accounts and their properties.
+type DatabaseAccountsListResult struct {
+ autorest.Response `json:"-"`
+ Value *[]DatabaseAccount `json:"value,omitempty"`
+}
+
+// FailoverPolicies is the list of new failover policies for the failover
+// priority change.
+type FailoverPolicies struct {
+ FailoverPolicies *[]FailoverPolicy `json:"failoverPolicies,omitempty"`
+}
+
+// FailoverPolicy is the failover policy for a given region of a database
+// account.
+type FailoverPolicy struct {
+ ID *string `json:"id,omitempty"`
+ LocationName *string `json:"locationName,omitempty"`
+ FailoverPriority *int32 `json:"failoverPriority,omitempty"`
+}
+
+// Location is a region in which the Azure DocumentDB database account is
+// deployed.
+type Location struct {
+ ID *string `json:"id,omitempty"`
+ LocationName *string `json:"locationName,omitempty"`
+ DocumentEndpoint *string `json:"documentEndpoint,omitempty"`
+ ProvisioningState *string `json:"provisioningState,omitempty"`
+ FailoverPriority *int32 `json:"failoverPriority,omitempty"`
+}
+
+// Resource is a database account resource.
+type Resource struct {
+ ID *string `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Location *string `json:"location,omitempty"`
+ Tags *map[string]*string `json:"tags,omitempty"`
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/version.go
new file mode 100755
index 000000000000..dbbd4d7b1f9a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/documentdb/version.go
@@ -0,0 +1,29 @@
+package documentdb
+
+// Copyright (c) Microsoft and contributors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+
+// UserAgent returns the UserAgent string to use when sending http.Requests.
+func UserAgent() string {
+ return "Azure-SDK-For-Go/v10.0.2-beta arm-documentdb/2015-04-08"
+}
+
+// Version returns the semantic version (see http://semver.org) of the client.
+func Version() string {
+ return "v10.0.2-beta"
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index c2a730060651..9bab32dd127d 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -1,7 +1,15 @@
{
"comment": "",
- "ignore": "appengine test github.com/hashicorp/nomad/",
+ "ignore": "test",
"package": [
+ {
+ "checksumSHA1": "Beu5GwttkjDfz+YZ4q5L15R10Z8=",
+ "path": "github.com/Azure/azure-sdk-for-go/arm/appinsights",
+ "revision": "5841475edc7c8725d79885d635aa8956f97fdf0e",
+ "revisionTime": "2017-05-10T22:14:13Z",
+ "version": "=v10.0.2-beta",
+ "versionExact": "v10.0.2-beta"
+ },
{
"checksumSHA1": "cmMpbgzcc+qBJOOO80ZmgLdij5I=",
"path": "github.com/Azure/azure-sdk-for-go/arm/cdn",
@@ -42,6 +50,23 @@
"version": "v10.0.2-beta",
"versionExact": "v10.0.2-beta"
},
+ {
+ "checksumSHA1": "wQBiO1nFX8II54iaQW2vJ7iBcuI=",
+ "path": "github.com/Azure/azure-sdk-for-go/arm/documentdb",
+ "revision": "5841475edc7c8725d79885d635aa8956f97fdf0e",
+ "revisionTime": "2017-05-10T22:14:13Z",
+ "version": "=v10.0.2-beta",
+ "versionExact": "v10.0.2-beta"
+
+ },
+ {
+ "checksumSHA1": "RF2C9ir2cOlIFnq/hsJJaBSayJk=",
+ "path": "github.com/Azure/azure-sdk-for-go/arm/dns",
+ "revision": "5841475edc7c8725d79885d635aa8956f97fdf0e",
+ "revisionTime": "2017-05-10T22:14:13Z",
+ "version": "v10.0.2-beta",
+ "versionExact": "v10.0.2-beta"
+ },
{
"checksumSHA1": "eautqQaMqPwrTLVl81qpTSVtxI8=",
"path": "github.com/Azure/azure-sdk-for-go/arm/eventhub",
diff --git a/website/azurerm.erb b/website/azurerm.erb
index 95c5b39791cf..1dc68166ff3c 100644
--- a/website/azurerm.erb
+++ b/website/azurerm.erb
@@ -18,13 +18,18 @@
azurerm_client_config
- >
- azurerm_resource_group
+ >
+ azurerm_managed_disk
-
+
>
azurerm_public_ip
+
+ >
+ azurerm_resource_group
+
+
@@ -37,6 +42,17 @@
+ >
+ Application Insights Resources
+
+
+
>
CDN Resources
+ >
+ CosmosDB (DocumentDB) Resources
+
+
+
>
DNS Resources
+ >
+ azurerm_dns_ptr_record
+
+
>
azurerm_dns_srv_record
@@ -178,12 +209,16 @@
Network Resources
@@ -257,6 +288,10 @@
azurerm_servicebus_namespace
+ >
+ azurerm_servicebus_queue
+
+
>
azurerm_servicebus_subscription
@@ -330,8 +365,6 @@
-
-
>
Virtual Machine Resources
diff --git a/website/docs/d/managed_disk.html.markdown b/website/docs/d/managed_disk.html.markdown
new file mode 100644
index 000000000000..e154438fa0be
--- /dev/null
+++ b/website/docs/d/managed_disk.html.markdown
@@ -0,0 +1,113 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_managed_disk"
+sidebar_current: "docs-azurerm-datasource-managed-disk"
+description: |-
+ Get information about the specified managed disk.
+---
+
+# azurerm\_managed\_disk
+
+Use this data source to access the properties of an existing Azure Managed Disk.
+
+## Example Usage
+
+```hcl
+data "azurerm_managed_disk" "datasourcemd" {
+ name = "testManagedDisk"
+ resource_group_name = "acctestRG"
+}
+
+resource "azurerm_virtual_network" "test" {
+ name = "acctvn"
+ address_space = ["10.0.0.0/16"]
+ location = "West US 2"
+ resource_group_name = "acctestRG"
+}
+
+resource "azurerm_subnet" "test" {
+ name = "acctsub"
+ resource_group_name = "acctestRG"
+ virtual_network_name = "${azurerm_virtual_network.test.name}"
+ address_prefix = "10.0.2.0/24"
+}
+
+resource "azurerm_network_interface" "test" {
+ name = "acctni"
+ location = "West US 2"
+ resource_group_name = "acctestRG"
+
+ ip_configuration {
+ name = "testconfiguration1"
+ subnet_id = "${azurerm_subnet.test.id}"
+ private_ip_address_allocation = "dynamic"
+ }
+}
+
+resource "azurerm_virtual_machine" "test" {
+ name = "acctvm"
+ location = "West US 2"
+ resource_group_name = "acctestRG"
+ network_interface_ids = ["${azurerm_network_interface.test.id}"]
+ vm_size = "Standard_DS1_v2"
+
+ storage_image_reference {
+ publisher = "Canonical"
+ offer = "UbuntuServer"
+ sku = "14.04.2-LTS"
+ version = "latest"
+ }
+
+ storage_os_disk {
+ name = "myosdisk1"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ managed_disk_type = "Standard_LRS"
+ }
+
+ storage_data_disk {
+ name = "datadisk_new"
+ managed_disk_type = "Standard_LRS"
+ create_option = "Empty"
+ lun = 0
+ disk_size_gb = "1023"
+ }
+
+ storage_data_disk {
+ name = "${data.azurerm_managed_disk.datasourcemd.name}"
+ managed_disk_id = "${data.azurerm_managed_disk.datasourcemd.id}"
+ create_option = "Attach"
+ lun = 1
+ disk_size_gb = "${data.azurerm_managed_disk.datasourcemd.disk_size_gb}"
+ }
+
+ os_profile {
+ computer_name = "hostname"
+ admin_username = "testadmin"
+ admin_password = "Password1234!"
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = false
+ }
+
+ tags {
+ environment = "staging"
+ }
+}
+```
+
+## Argument Reference
+
+* `name` - (Required) Specifies the name of the Managed Disk.
+* `resource_group_name` - (Required) Specifies the name of the resource group.
+
+
+## Attributes Reference
+
+* `storage_account_type` - The storage account type for the managed disk.
+* `source_uri` - The source URI for the managed disk
+* `source_resource_id` - ID of an existing managed disk that the current resource was created from.
+* `os_type` - The operating system for managed disk. Valid values are `Linux` or `Windows`
+* `disk_size_gb` - The size of the managed disk in gigabytes.
+* `tags` - A mapping of tags assigned to the resource.
diff --git a/website/docs/r/application_insights.html.markdown b/website/docs/r/application_insights.html.markdown
new file mode 100644
index 000000000000..c02e604acf5a
--- /dev/null
+++ b/website/docs/r/application_insights.html.markdown
@@ -0,0 +1,62 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_application_insights"
+sidebar_current: "docs-azurerm-resource-application-insights"
+description: |-
+ Create an Application Insights component.
+---
+
+# azurerm\_application\_insights
+
+Create an Application Insights component.
+
+## Example Usage
+
+```hcl
+resource "azurerm_resource_group" "test" {
+ name = "api-rg-pro"
+ location = "West Europe"
+}
+
+resource "azurerm_application_insights" "test" {
+ name = "api-appinsights-pro"
+ location = "West Europe"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ application_type = "Web"
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) Specifies the name of the Application Insights component. Changing this forces a
+ new resource to be created.
+
+* `resource_group_name` - (Required) The name of the resource group in which to
+ create the Application Insights component.
+
+* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
+
+* `application_type` - (Required) Specifies the type of Application Insights to create. Valid values are `Web` and `Other`.
+
+* `tags` - (Optional) A mapping of tags to assign to the resource.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The ID of the Application Insights component.
+
+* `app_id` - The App ID associated with this Application Insights component.
+
+* `instrumentation_key` - The Instrumentation Key for this Application Insights component.
+
+
+## Import
+
+Application Insights instances can be imported using the `resource id`, e.g.
+
+```
+terraform import azurerm_application_insights.instance1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.insights/components/instance1
+```
diff --git a/website/docs/r/cdn_endpoint.html.markdown b/website/docs/r/cdn_endpoint.html.markdown
index 0c64e24ec3b6..7e08922cab78 100644
--- a/website/docs/r/cdn_endpoint.html.markdown
+++ b/website/docs/r/cdn_endpoint.html.markdown
@@ -75,7 +75,7 @@ The `origin` block supports:
* `name` - (Required) The name of the origin. This is an arbitrary value. However, this value needs to be unique under endpoint.
-* `host_name` - (Required) A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address.
+* `host_name` - (Required) A string that determines the hostname/IP address of the origin server. This string can be a domain name, Storage Account endpoint, Web App endpoint, IPv4 address or IPv6 address.
* `http_port` - (Optional) The HTTP port of the origin. Defaults to null. When null, 80 will be used for HTTP.
@@ -93,4 +93,4 @@ CDN Endpoints can be imported using the `resource id`, e.g.
```
terraform import azurerm_cdn_endpoint.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Cdn/profiles/myprofile1/endpoints/myendpoint1
-```
\ No newline at end of file
+```
diff --git a/website/docs/r/container_service.html.markdown b/website/docs/r/container_service.html.markdown
index a83121a09173..3bb626b8e33f 100644
--- a/website/docs/r/container_service.html.markdown
+++ b/website/docs/r/container_service.html.markdown
@@ -187,7 +187,7 @@ The following arguments are supported:
`ssh_key` supports the following:
-* `key_data` - (Required) The Public SSH Key used to access the cluster. The certificate must be in PEM format with or without headers.
+* `key_data` - (Required) The Public SSH Key used to access the cluster.
`agent_pool_profile` supports the following:
diff --git a/website/docs/r/cosmosdb_account.html.markdown b/website/docs/r/cosmosdb_account.html.markdown
new file mode 100644
index 000000000000..237736a1f216
--- /dev/null
+++ b/website/docs/r/cosmosdb_account.html.markdown
@@ -0,0 +1,100 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_cosmosdb_account"
+sidebar_current: "docs-azurerm-resource-cosmosdb-account"
+description: |-
+ Creates a new CosmosDB (formally DocumentDB) Account.
+---
+
+# azurerm\_cosmos_db
+
+Creates a new CosmosDB (formally DocumentDB) Account.
+
+## Example Usage
+
+```
+resource "azurerm_resource_group" "test" {
+ name = "resourceGroup1"
+ location = "West Europe"
+}
+
+resource "azurerm_cosmosdb_account" "test" {
+ name = "cosmosDBAccount1"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ offer_type = "Standard"
+ consistency_policy {
+ consistency_level = "BoundedStaleness"
+ }
+
+ failover_policy {
+ location = "West Europe"
+ priority = 0
+ }
+
+ failover_policy {
+ location = "East US"
+ priority = 1
+ }
+
+ tags {
+ hello = "world"
+ }
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) Specifies the name of the CosmosDB Account. Changing this forces a new resource to be created.
+
+* `resource_group_name` - (Required) The name of the resource group in which the CosmosDB Account is created. Changing this forces a new resource to be created.
+
+* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
+
+* `offer_type` - (Required) Specifies the Offer Type to use for this DocumentDB Account - currently this can only be set to `Standard`.
+
+* `consistency_policy` - (Required) Specifies a `consistency_policy` resource, used to define the consistency policy for this DocumentDB account.
+
+* `failover_policy` - (Required) Specifies a `failover_policy` resource, used to define where data should be replicated.
+
+* `ip_range_filter` - (Optional) DocumentDB Firewall Support: This value specifies the set of IP addresses or IP address ranges in CIDR form to be included as the allowed list of client IP's for a given database account. IP addresses/ranges must be comma separated and must not contain any spaces.
+
+* `tags` - (Optional) A mapping of tags to assign to the resource.
+
+`consistency_policy` supports the following:
+
+* `consistency_level` - (Required) The Consistency Level to use for this CosmosDB Account - can be either `BoundedStaleness`, `Eventual`, `Session` or `Strong`.
+* `max_interval_in_seconds` - (Optional) When used with the Bounded Staleness consistency level, this value represents the time amount of staleness (in seconds) tolerated. Accepted range for this value is 1 - 100. Defaults to `5`. Required when `consistency_level` is set to `BoundedStaleness`.
+* `max_staleness` - (Optional) When used with the Bounded Staleness consistency level, this value represents the number of stale requests tolerated. Accepted range for this value is 1 – 2,147,483,647. Defaults to `100`. Required when `consistency_level` is set to `BoundedStaleness`.
+
+~> **Note**: `max_interval_in_seconds` and `max_staleness` can only be set to custom values when `consistency_level` is set to `BoundedStaleness` - otherwise they will return the default values shown above.
+
+`failover_policy` supports the following:
+
+* `location` - (Required) The name of the Azure region to host replicated data.
+* `priority` - (Required) The failover priority of the region. A failover priority of 0 indicates a write region. The maximum value for a failover priority = (total number of regions - 1). Failover priority values must be unique for each of the regions in which the database account exists.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The CosmosDB Account ID.
+
+* `primary_master_key` - The Primary master key for the CosmosDB Account.
+
+* `secondary_master_key` - The Secondary master key for the CosmosDB Account.
+
+* `primary_readonly_master_key` - The Primary read-only master Key for the CosmosDB Account.
+
+* `secondary_readonly_master_key` - The Secondary read-only master key for the CosmosDB Account.
+
+
+## Import
+
+CosmosDB Accounts can be imported using the `resource id`, e.g.
+
+```
+terraform import azurerm_cosmosdb_account.account1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DocumentDB/databaseAccounts/account1
+```
diff --git a/website/docs/r/dns_mx_record.html.markdown b/website/docs/r/dns_mx_record.html.markdown
index 197759215236..af183e6602ff 100644
--- a/website/docs/r/dns_mx_record.html.markdown
+++ b/website/docs/r/dns_mx_record.html.markdown
@@ -54,7 +54,7 @@ The following arguments are supported:
* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
+* `ttl` - (Required) The Time To Live (TTL) of the DNS record.
* `record` - (Required) A list of values that make up the SRV record. Each `record` block supports fields documented below.
diff --git a/website/docs/r/dns_ns_record.html.markdown b/website/docs/r/dns_ns_record.html.markdown
index 99d6fd946533..057d2533c9be 100644
--- a/website/docs/r/dns_ns_record.html.markdown
+++ b/website/docs/r/dns_ns_record.html.markdown
@@ -52,7 +52,7 @@ The following arguments are supported:
* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
+* `ttl` - (Required) The Time To Live (TTL) of the DNS record.
* `record` - (Required) A list of values that make up the NS record. Each `record` block supports fields documented below.
diff --git a/website/docs/r/dns_ptr_record.html.markdown b/website/docs/r/dns_ptr_record.html.markdown
new file mode 100644
index 000000000000..4f689eb32533
--- /dev/null
+++ b/website/docs/r/dns_ptr_record.html.markdown
@@ -0,0 +1,65 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_dns_ptr_record"
+sidebar_current: "docs-azurerm-resource-dns-ptr-record"
+description: |-
+ Create a DNS PTR Record.
+---
+
+# azurerm\_dns\_ptr\_record
+
+Enables you to manage DNS PTR Records within Azure DNS.
+
+## Example Usage
+
+```hcl
+resource "azurerm_resource_group" "test" {
+ name = "acceptanceTestResourceGroup1"
+ location = "West US"
+}
+
+resource "azurerm_dns_zone" "test" {
+ name = "mydomain.com"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+}
+
+resource "azurerm_dns_ptr_record" "test" {
+ name = "test"
+ zone_name = "${azurerm_dns_zone.test.name}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ ttl = "300"
+ records = ["yourdomain.com"]
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name of the DNS PTR Record.
+
+* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
+
+* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
+
+* `ttl` - (Required) The Time To Live (TTL) of the DNS record.
+
+* `records` - (Required) List of Fully Qualified Domain Names.
+
+* `tags` - (Optional) A mapping of tags to assign to the resource.
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The DNS PTR Record ID.
+
+* `etag` - The etag of the record set.
+
+## Import
+
+PTR records can be imported using the `resource id`, e.g.
+
+```
+terraform import azurerm_dns_ptr_record.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1/PTR/myrecord1
+```
diff --git a/website/docs/r/dns_srv_record.html.markdown b/website/docs/r/dns_srv_record.html.markdown
index 34b5719b2d06..d8534f17439d 100644
--- a/website/docs/r/dns_srv_record.html.markdown
+++ b/website/docs/r/dns_srv_record.html.markdown
@@ -51,7 +51,7 @@ The following arguments are supported:
* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
+* `ttl` - (Required) The Time To Live (TTL) of the DNS record.
* `record` - (Required) A list of values that make up the SRV record. Each `record` block supports fields documented below.
diff --git a/website/docs/r/dns_txt_record.html.markdown b/website/docs/r/dns_txt_record.html.markdown
index 14299be53d4d..b96df41b9407 100644
--- a/website/docs/r/dns_txt_record.html.markdown
+++ b/website/docs/r/dns_txt_record.html.markdown
@@ -52,7 +52,7 @@ The following arguments are supported:
* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
+* `ttl` - (Required) The Time To Live (TTL) of the DNS record.
* `record` - (Required) A list of values that make up the txt record. Each `record` block supports fields documented below.
diff --git a/website/docs/r/express_route_circuit.html.markdown b/website/docs/r/express_route_circuit.html.markdown
index fb9a159c2a25..15cad7353595 100644
--- a/website/docs/r/express_route_circuit.html.markdown
+++ b/website/docs/r/express_route_circuit.html.markdown
@@ -1,7 +1,7 @@
---
layout: "azurerm"
page_title: "Azure Resource Manager: azurerm_express_route_circuit"
-sidebar_current: "docs-azurerm-resource-express-route-circuit"
+sidebar_current: "docs-azurerm-resource-network-express-route-circuit"
description: |-
Creates an ExpressRoute circuit.
---
@@ -54,7 +54,7 @@ The following arguments are supported:
* `peering_location` - (Required) The name of the peering location and not the ARM resource location.
-* `bandwidth_in_mbps` - (Required) The bandwidth in Mbps of the circuit being created. Once you increase your bandwidth,
+* `bandwidth_in_mbps` - (Required) The bandwidth in Mbps of the circuit being created. Once you increase your bandwidth,
you will not be able to decrease it to its previous value.
* `sku` - (Required) Chosen SKU of ExpressRoute circuit as documented below.
@@ -68,7 +68,7 @@ The following arguments are supported:
* `tier` - (Required) The service tier. Value must be either "Premium" or "Standard".
-* `family` - (Required) The billing mode. Value must be either "MeteredData" or "UnlimitedData".
+* `family` - (Required) The billing mode. Value must be either "MeteredData" or "UnlimitedData".
Once you set the billing model to "UnlimitedData", you will not be able to switch to "MeteredData".
## Attributes Reference
@@ -76,7 +76,7 @@ The following arguments are supported:
The following attributes are exported:
* `id` - The Resource ID of the ExpressRoute circuit.
-* `service_provider_provisioning_state` - The ExpressRoute circuit provisioning state from your chosen service provider.
+* `service_provider_provisioning_state` - The ExpressRoute circuit provisioning state from your chosen service provider.
Possible values are "NotProvisioned", "Provisioning", "Provisioned", and "Deprovisioning".
* `service_key` - The string needed by the service provider to provision the ExpressRoute circuit.
diff --git a/website/docs/r/local_network_gateway.html.markdown b/website/docs/r/local_network_gateway.html.markdown
index b70e99741603..9baffc643189 100644
--- a/website/docs/r/local_network_gateway.html.markdown
+++ b/website/docs/r/local_network_gateway.html.markdown
@@ -1,7 +1,7 @@
---
layout: "azurerm"
page_title: "Azure Resource Manager: azurerm_local_network_gateway"
-sidebar_current: "docs-azurerm-resource-local-network-gateway"
+sidebar_current: "docs-azurerm-resource-network-local-network-gateway"
description: |-
Creates a new local network gateway connection over which specific connections can be configured.
---
diff --git a/website/docs/r/network_security_group.html.markdown b/website/docs/r/network_security_group.html.markdown
index 1e49cfc0cbd8..e00cba2e4f05 100644
--- a/website/docs/r/network_security_group.html.markdown
+++ b/website/docs/r/network_security_group.html.markdown
@@ -10,6 +10,10 @@ description: |-
Create a network security group that contains a list of network security rules.
+~> **NOTE on Network Security Groups and Network Security Rules:** Terraform currently
+provides both a standalone [Network Security Rule resource](network_security_rule.html), and allows for Network Security Rules to be defined in-line within the [Network Security Group resource](network_security_group.html).
+At this time you cannot use a Network Security Group with in-line Network Security Rules in conjunction with any Network Security Rule resources. Doing so will cause a conflict of rule settings and will overwrite rules.
+
## Example Usage
```hcl
diff --git a/website/docs/r/network_security_rule.html.markdown b/website/docs/r/network_security_rule.html.markdown
index c30506706ba7..ac65de7998e4 100644
--- a/website/docs/r/network_security_rule.html.markdown
+++ b/website/docs/r/network_security_rule.html.markdown
@@ -10,6 +10,10 @@ description: |-
Create a Network Security Rule.
+~> **NOTE on Network Security Groups and Network Security Rules:** Terraform currently
+provides both a standalone [Network Security Rule resource](network_security_rule.html), and allows for Network Security Rules to be defined in-line within the [Network Security Group resource](network_security_group.html).
+At this time you cannot use a Network Security Group with in-line Network Security Rules in conjunction with any Network Security Rule resources. Doing so will cause a conflict of rule settings and will overwrite rules.
+
## Example Usage
```hcl
@@ -43,7 +47,7 @@ resource "azurerm_network_security_rule" "test" {
The following arguments are supported:
-* `name` - (Required) The name of the security rule.
+* `name` - (Required) The name of the security rule. This needs to be unique across all Rules in the Network Security Group. Changing this forces a new resource to be created.
* `resource_group_name` - (Required) The name of the resource group in which to
create the Network Security Rule.
@@ -81,4 +85,4 @@ Network Security Rules can be imported using the `resource id`, e.g.
```
terraform import azurerm_network_security_rule.rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkSecurityGroups/mySecurityGroup/securityRules/rule1
-```
\ No newline at end of file
+```
diff --git a/website/docs/r/redis_cache.html.markdown b/website/docs/r/redis_cache.html.markdown
index ddb904ddc719..f45d65c5b5e2 100644
--- a/website/docs/r/redis_cache.html.markdown
+++ b/website/docs/r/redis_cache.html.markdown
@@ -28,7 +28,7 @@ resource "azurerm_redis_cache" "test" {
enable_non_ssl_port = false
redis_configuration {
- maxclients = "256"
+ maxclients = 256
}
}
```
@@ -51,7 +51,7 @@ resource "azurerm_redis_cache" "test" {
enable_non_ssl_port = false
redis_configuration {
- maxclients = "1000"
+ maxclients = 1000
}
}
```
@@ -75,14 +75,45 @@ resource "azurerm_redis_cache" "test" {
shard_count = 3
redis_configuration {
- maxclients = "7500"
- maxmemory_reserved = "2"
- maxmemory_delta = "2"
+ maxclients = 7500
+ maxmemory_reserved = 2
+ maxmemory_delta = 2
maxmemory_policy = "allkeys-lru"
}
}
```
+## Example Usage (Premium with Backup)
+
+```hcl
+resource "azurerm_resource_group" "test" {
+ name = "redisrg"
+ location = "West US"
+}
+resource "azurerm_storage_account" "test" {
+ name = "redissa"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ location = "${azurerm_resource_group.test.location}"
+ account_type = "Standard_GRS"
+}
+resource "azurerm_redis_cache" "test" {
+ name = "example-redis"
+ location = "${azurerm_resource_group.test.location}"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ capacity = 3
+ family = "P"
+ sku_name = "Premium"
+ enable_non_ssl_port = false
+ redis_configuration {
+ maxclients = 256
+ rdb_backup_enabled = true
+ rdb_backup_frequency = 60
+ rdb_backup_max_snapshot_count = 1
+ rdb_storage_connection_string = "DefaultEndpointsProtocol=https;BlobEndpoint=${azurerm_storage_account.test.primary_blob_endpoint};AccountName=${azurerm_storage_account.test.name};AccountKey=${azurerm_storage_account.test.primary_access_key}"
+ }
+}
+```
+
## Argument Reference
The following arguments are supported:
@@ -107,13 +138,27 @@ The pricing group for the Redis Family - either "C" or "P" at present.
* `shard_count` - (Optional) *Only available when using the Premium SKU* The number of Shards to create on the Redis Cluster.
-* `redis_configuration` - (Required) Potential Redis configuration values - with some limitations by SKU - defaults/details are shown below.
+* `redis_configuration` - (Required) A `redis_configuration` as defined below - with some limitations by SKU - defaults/details are shown below.
+
+---
+
+* `redis_configuration` supports the following:
+
+* `maxclients` - (Optional) Set the max number of connected clients at the same time. Defaults are shown below.
+* `maxmemory_reserve` - (Optional) Value in megabytes reserved for non-cache usage e.g. failover. Defaults are shown below.
+* `maxmemory_delta` - (Optional) The max-memory delta for this Redis instance. Defaults are shown below.
+* `maxmemory_policy` - (Optional) How Redis will select what to remove when `maxmemory` is reached. Defaults are shown below.
+
+* `rdb_backup_enabled` - (Optional) Is Backup Enabled? Only supported on Premium SKU's.
+* `rdb_backup_frequency` - (Optional) The Backup Frequency in Minutes. Only supported on Premium SKU's. Possible values are: `15`, `30`, `60`, `360`, `720` and `1440`.
+* `rdb_backup_max_snapshot_count` - (Optional) The maximum number of snapshots to create as a backup. Only supported for Premium SKU's.
+* `rdb_storage_connection_string` - (Optional) The Connection String to the Storage Account. Only supported for Premium SKU's. In the format: `DefaultEndpointsProtocol=https;BlobEndpoint=${azurerm_storage_account.test.primary_blob_endpoint};AccountName=${azurerm_storage_account.test.name};AccountKey=${azurerm_storage_account.test.primary_access_key}`.
```hcl
redis_configuration {
- maxclients = "512"
- maxmemory_reserve = "10"
- maxmemory_delta = "2"
+ maxclients = 512
+ maxmemory_reserve = 10
+ maxmemory_delta = 2
maxmemory_policy = "allkeys-lru"
}
```
diff --git a/website/docs/r/servicebus_queue.html.markdown b/website/docs/r/servicebus_queue.html.markdown
new file mode 100644
index 000000000000..0100235eac23
--- /dev/null
+++ b/website/docs/r/servicebus_queue.html.markdown
@@ -0,0 +1,114 @@
+---
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_servicebus_queue"
+sidebar_current: "docs-azurerm-resource-servicebus-queue"
+description: |-
+ Create a ServiceBus Queue.
+---
+
+# azurerm\_servicebus\_queue
+
+Create and manage a ServiceBus Queue.
+
+## Example Usage
+
+```
+resource "azurerm_resource_group" "test" {
+ name = "resourceGroup1"
+ location = "West US"
+}
+
+resource "azurerm_servicebus_namespace" "test" {
+ name = "acceptanceTestServiceBusNamespace"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ sku = "standard"
+
+ tags {
+ environment = "Production"
+ }
+}
+
+resource "azurerm_servicebus_queue" "test" {
+ name = "testQueue"
+ location = "West US"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ namespace_name = "${azurerm_servicebus_namespace.test.name}"
+
+ enable_partitioning = true
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `name` - (Required) Specifies the name of the ServiceBus Queue resource. Changing this forces a
+ new resource to be created.
+
+* `namespace_name` - (Required) The name of the ServiceBus Namespace to create
+ this queue in. Changing this forces a new resource to be created.
+
+* `location` - (Required) Specifies the supported Azure location where the resource exists.
+ Changing this forces a new resource to be created.
+
+* `resource_group_name` - (Required) The name of the resource group in which to
+ create the namespace. Changing this forces a new resource to be created.
+
+* `auto_delete_on_idle` - (Optional) The idle interval after which the
+ Queue is automatically deleted, minimum of 5 minutes. Provided in the [TimeSpan](#timespan-format)
+ format.
+
+* `default_message_ttl` - (Optional) The TTL of messages sent to this queue. This is the default value
+ used when TTL is not set on message itself. Provided in the [TimeSpan](#timespan-format)
+ format.
+
+* `duplicate_detection_history_time_window` - (Optional) The duration during which
+ duplicates can be detected. Default value is 10 minutes. Provided in the [TimeSpan](#timespan-format) format.
+
+* `enable_batched_operations` - (Optional) Boolean flag which controls if server-side
+ batched operations are enabled. Defaults to `false`.
+
+* `enable_express` - (Optional) Boolean flag which controls whether Express Entities
+ are enabled. An express queue holds a message in memory temporarily before writing
+ it to persistent storage. Defaults to `false` for Basic and Standard. For Premium, it MUST
+ be set to `false`.
+
+~> **NOTE:** Service Bus Premium namespaces do not support Express Entities, so `enable_express` MUST be set to `false`.
+
+* `enable_partitioning` - (Optional) Boolean flag which controls whether to enable
+ the queue to be partitioned across multiple message brokers. Changing this forces
+ a new resource to be created. Defaults to `false` for Basic and Standard. For Premium, it MUST
+ be set to `true`.
+
+~> **NOTE:** Service Bus Premium namespaces are always partitioned, so `enable_partitioning` MUST be set to `true`.
+
+* `max_size_in_megabytes` - (Optional) Integer value which controls the size of
+ memory allocated for the queue. For supported values see the "Queue/topic size"
+ section of [this document](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-quotas).
+
+* `requires_duplicate_detection` - (Optional) Boolean flag which controls whether
+ the Queue requires duplicate detection. Changing this forces
+ a new resource to be created. Defaults to `false`.
+
+* `support_ordering` - (Optional) Boolean flag which controls whether the Queue
+ supports ordering. Defaults to `false`.
+
+### TimeSpan Format
+
+Some arguments for this resource are required in the TimeSpan format which is
+used to represent a length of time. The supported format is documented [here](https://msdn.microsoft.com/en-us/library/se73z7b9(v=vs.110).aspx#Anchor_2)
+
+## Attributes Reference
+
+The following attributes are exported:
+
+* `id` - The ServiceBus Queue ID.
+
+## Import
+
+Service Bus Queue can be imported using the `resource id`, e.g.
+
+```
+terraform import azurerm_servicebus_queue.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.servicebus/namespaces/sbns1/queues/snqueue1
+```
diff --git a/website/docs/r/servicebus_topic.html.markdown b/website/docs/r/servicebus_topic.html.markdown
index 040518788e38..7394e7337c0b 100644
--- a/website/docs/r/servicebus_topic.html.markdown
+++ b/website/docs/r/servicebus_topic.html.markdown
@@ -10,8 +10,7 @@ description: |-
Create a ServiceBus Topic.
-**Note** Topics can only be created in Namespaces with an SKU or `standard` or
-higher.
+**Note** Topics can only be created in Namespaces with an SKU of `standard` or higher.
## Example Usage
@@ -58,6 +57,8 @@ The following arguments are supported:
* `resource_group_name` - (Required) The name of the resource group in which to
create the namespace. Changing this forces a new resource to be created.
+* `status` - (Optional) The Status of the Service Bus Topic. Acceptable values are `Active` or `Disabled`. Defaults to `Active`.
+
* `auto_delete_on_idle` - (Optional) The idle interval after which the
Topic is automatically deleted, minimum of 5 minutes. Provided in the [TimeSpan](#timespan-format)
format.
diff --git a/website/docs/r/storage_account.html.markdown b/website/docs/r/storage_account.html.markdown
index 9b4e13064dd4..e04a7ef2bc76 100644
--- a/website/docs/r/storage_account.html.markdown
+++ b/website/docs/r/storage_account.html.markdown
@@ -84,6 +84,8 @@ The following attributes are exported in addition to the arguments listed above:
* `primary_file_endpoint` - The endpoint URL for file storage in the primary location.
* `primary_access_key` - The primary access key for the storage account
* `secondary_access_key` - The secondary access key for the storage account
+* `primary_blob_connection_string` - The connection string associated with the primary blob location
+* `secondary_blob_connection_string` - The connection string associated with the secondary blob location
## Import
@@ -92,4 +94,3 @@ Storage Accounts can be imported using the `resource id`, e.g.
```
terraform import azurerm_storage_account.storageAcc1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Storage/storageAccounts/myaccount
```
-
diff --git a/website/docs/r/subnet.html.markdown b/website/docs/r/subnet.html.markdown
index 34f4d97ea063..72b8a87f520d 100644
--- a/website/docs/r/subnet.html.markdown
+++ b/website/docs/r/subnet.html.markdown
@@ -10,6 +10,10 @@ description: |-
Creates a new subnet. Subnets represent network segments within the IP space defined by the virtual network.
+~> **NOTE on Virtual Networks and Subnet's:** Terraform currently
+provides both a standalone [Subnet resource](subnet.html), and allows for Subnets to be defined in-line within the [Virtual Network resource](virtual_network.html).
+At this time you cannot use a Virtual Network with in-line Subnets in conjunction with any Subnet resources. Doing so will cause a conflict of Subnet configurations and will overwrite Subnet's.
+
## Example Usage
```hcl
diff --git a/website/docs/r/template_deployment.html.markdown b/website/docs/r/template_deployment.html.markdown
index ffb6386958fd..ed1efec09dbb 100644
--- a/website/docs/r/template_deployment.html.markdown
+++ b/website/docs/r/template_deployment.html.markdown
@@ -10,6 +10,10 @@ description: |-
Create a template deployment of resources
+~> **Note on ARM Template Deployments:** Due to the way the underlying Azure API is designed, Terraform can only manage the deployment of the ARM Template - and not any resources which are created by it.
+This means that when deleting the `azurerm_template_deployment` resource, Terraform will only remove the reference to the deployment, whilst leaving any resources created by that ARM Template Deployment.
+One workaround for this is to use a unique Resource Group for each ARM Template Deployment, which means deleting the Resource Group would contain any resources created within it - however this isn't ideal. [More information](https://docs.microsoft.com/en-us/rest/api/resources/deployments#Deployments_Delete).
+
## Example Usage
```hcl
diff --git a/website/docs/r/virtual_machine.html.markdown b/website/docs/r/virtual_machine.html.markdown
index db1d72683feb..ce3f5c088309 100644
--- a/website/docs/r/virtual_machine.html.markdown
+++ b/website/docs/r/virtual_machine.html.markdown
@@ -10,18 +10,18 @@ description: |-
Create a virtual machine.
-## Example Usage (Unmanaged Disks)
+## Example Usage with Managed Disks (Recommended)
```hcl
resource "azurerm_resource_group" "test" {
name = "acctestrg"
- location = "West US"
+ location = "West US 2"
}
resource "azurerm_virtual_network" "test" {
name = "acctvn"
address_space = ["10.0.0.0/16"]
- location = "West US"
+ location = "West US 2"
resource_group_name = "${azurerm_resource_group.test.name}"
}
@@ -34,7 +34,7 @@ resource "azurerm_subnet" "test" {
resource "azurerm_network_interface" "test" {
name = "acctni"
- location = "West US"
+ location = "West US 2"
resource_group_name = "${azurerm_resource_group.test.name}"
ip_configuration {
@@ -44,30 +44,27 @@ resource "azurerm_network_interface" "test" {
}
}
-resource "azurerm_storage_account" "test" {
- name = "accsa"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "westus"
- account_type = "Standard_LRS"
-
- tags {
- environment = "staging"
- }
-}
-
-resource "azurerm_storage_container" "test" {
- name = "vhds"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
- container_access_type = "private"
+resource "azurerm_managed_disk" "test" {
+ name = "datadisk_existing"
+ location = "West US 2"
+ resource_group_name = "${azurerm_resource_group.test.name}"
+ storage_account_type = "Standard_LRS"
+ create_option = "Empty"
+ disk_size_gb = "1023"
}
resource "azurerm_virtual_machine" "test" {
name = "acctvm"
- location = "West US"
+ location = "West US 2"
resource_group_name = "${azurerm_resource_group.test.name}"
network_interface_ids = ["${azurerm_network_interface.test.id}"]
- vm_size = "Standard_A0"
+ vm_size = "Standard_DS1_v2"
+
+ # Uncomment this line to delete the OS disk automatically when deleting the VM
+ # delete_os_disk_on_termination = true
+
+ # Uncomment this line to delete the data disks automatically when deleting the VM
+ # delete_data_disks_on_termination = true
storage_image_reference {
publisher = "Canonical"
@@ -77,10 +74,27 @@ resource "azurerm_virtual_machine" "test" {
}
storage_os_disk {
- name = "myosdisk1"
- vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd"
- caching = "ReadWrite"
- create_option = "FromImage"
+ name = "myosdisk1"
+ caching = "ReadWrite"
+ create_option = "FromImage"
+ managed_disk_type = "Standard_LRS"
+ }
+
+ # Optional data disks
+ storage_data_disk {
+ name = "datadisk_new"
+ managed_disk_type = "Standard_LRS"
+ create_option = "Empty"
+ lun = 0
+ disk_size_gb = "1023"
+ }
+
+ storage_data_disk {
+ name = "${azurerm_managed_disk.test.name}"
+ managed_disk_id = "${azurerm_managed_disk.test.id}"
+ create_option = "Attach"
+ lun = 1
+ disk_size_gb = "${azurerm_managed_disk.test.disk_size_gb}"
}
os_profile {
@@ -99,7 +113,7 @@ resource "azurerm_virtual_machine" "test" {
}
```
-## Example Usage With Additional Empty Data Disk (Unmanaged Disks)
+## Example Usage with Unmanaged Disks
```hcl
resource "azurerm_resource_group" "test" {
@@ -158,6 +172,12 @@ resource "azurerm_virtual_machine" "test" {
network_interface_ids = ["${azurerm_network_interface.test.id}"]
vm_size = "Standard_A0"
+ # Uncomment this line to delete the OS disk automatically when deleting the VM
+ # delete_os_disk_on_termination = true
+
+ # Uncomment this line to delete the data disks automatically when deleting the VM
+ # delete_data_disks_on_termination = true
+
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
@@ -172,6 +192,7 @@ resource "azurerm_virtual_machine" "test" {
create_option = "FromImage"
}
+ # Optional data disks
storage_data_disk {
name = "datadisk0"
vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/datadisk0.vhd"
@@ -196,102 +217,6 @@ resource "azurerm_virtual_machine" "test" {
}
```
-## Example Usage (Managed Disks)
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg"
- location = "West US 2"
-}
-
-resource "azurerm_virtual_network" "test" {
- name = "acctvn"
- address_space = ["10.0.0.0/16"]
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_subnet" "test" {
- name = "acctsub"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_network_name = "${azurerm_virtual_network.test.name}"
- address_prefix = "10.0.2.0/24"
-}
-
-resource "azurerm_network_interface" "test" {
- name = "acctni"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- ip_configuration {
- name = "testconfiguration1"
- subnet_id = "${azurerm_subnet.test.id}"
- private_ip_address_allocation = "dynamic"
- }
-}
-
-resource "azurerm_managed_disk" "test" {
- name = "datadisk_existing"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_type = "Standard_LRS"
- create_option = "Empty"
- disk_size_gb = "1023"
-}
-
-resource "azurerm_virtual_machine" "test" {
- name = "acctvm"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- network_interface_ids = ["${azurerm_network_interface.test.id}"]
- vm_size = "Standard_DS1_v2"
-
- storage_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "14.04.2-LTS"
- version = "latest"
- }
-
- storage_os_disk {
- name = "myosdisk1"
- caching = "ReadWrite"
- create_option = "FromImage"
- managed_disk_type = "Standard_LRS"
- }
-
- storage_data_disk {
- name = "datadisk_new"
- managed_disk_type = "Standard_LRS"
- create_option = "Empty"
- lun = 0
- disk_size_gb = "1023"
- }
-
- storage_data_disk {
- name = "${azurerm_managed_disk.test.name}"
- managed_disk_id = "${azurerm_managed_disk.test.id}"
- create_option = "Attach"
- lun = 1
- disk_size_gb = "${azurerm_managed_disk.test.disk_size_gb}"
- }
-
- os_profile {
- computer_name = "hostname"
- admin_username = "testadmin"
- admin_password = "Password1234!"
- }
-
- os_profile_linux_config {
- disable_password_authentication = false
- }
-
- tags {
- environment = "staging"
- }
-}
-```
-
## Argument Reference
The following arguments are supported:
@@ -367,7 +292,7 @@ For more information on the different example configurations, please check out t
* `computer_name` - (Required) Specifies the name of the virtual machine.
* `admin_username` - (Required) Specifies the name of the administrator account.
-* `admin_password` - (Required) Specifies the password of the administrator account.
+* `admin_password` - (Required for Windows, Optional for Linux) Specifies the password of the administrator account.
* `custom_data` - (Optional) Specifies custom data to supply to the machine. On linux-based systems, this can be used as a cloud-init script. On other systems, this will be copied as a file on disk. Internally, Terraform will base64 encode this value before sending it to the API. The maximum length of the binary array is 65535 bytes.
~> **NOTE:** `admin_password` must be between 6-72 characters long and must satisfy at least 3 of password complexity requirements from the following:
@@ -397,7 +322,7 @@ For more information on the different example configurations, please check out t
`os_profile_linux_config` supports the following:
-* `disable_password_authentication` - (Required) Specifies whether password authentication should be disabled.
+* `disable_password_authentication` - (Required) Specifies whether password authentication should be disabled. If set to `false`, an `admin_password` must be specified.
* `ssh_keys` - (Optional) Specifies a collection of `path` and `key_data` to be placed on the virtual machine.
~> **Note:** Please note that the only allowed `path` is `/home//.ssh/authorized_keys` due to a limitation of Azure.
@@ -412,10 +337,10 @@ For more information on the different example configurations, please check out t
* `certificate_url` - (Required) Specifies the URI of the key vault secrets in the format of `https:///secrets//`. Stored secret is the Base64 encoding of a JSON Object that which is encoded in UTF-8 of which the contents need to be
```json
-{
- "data":"",
+{
+ "data":"",
"dataType":"pfx",
- "password":""
+ "password":""
}
```
diff --git a/website/docs/r/virtual_machine_scale_set.html.markdown b/website/docs/r/virtual_machine_scale_set.html.markdown
index d13404fc7db8..951376d8f375 100644
--- a/website/docs/r/virtual_machine_scale_set.html.markdown
+++ b/website/docs/r/virtual_machine_scale_set.html.markdown
@@ -325,7 +325,7 @@ The following arguments are supported:
`storage_profile_os_disk` supports the following:
-* `name` - (Required) Specifies the disk name. Value must be blank (`""`) when `managed_disk_type` is specified.
+* `name` - (Optional) Specifies the disk name. Must be specified when using unmanaged disk ('managed_disk_type' property not set).
* `vhd_containers` - (Optional) Specifies the vhd uri. Cannot be used when `image` or `managed_disk_type` is specified.
* `managed_disk_type` - (Optional) Specifies the type of managed disk to create. Value you must be either `Standard_LRS` or `Premium_LRS`. Cannot be used when `vhd_containers` or `image` is specified.
* `create_option` - (Required) Specifies how the virtual machine should be created. The only possible option is `FromImage`.
diff --git a/website/docs/r/virtual_network.html.markdown b/website/docs/r/virtual_network.html.markdown
index 69796007228d..8e3f032442a7 100644
--- a/website/docs/r/virtual_network.html.markdown
+++ b/website/docs/r/virtual_network.html.markdown
@@ -11,6 +11,10 @@ description: |-
Creates a new virtual network including any configured subnets. Each subnet can
optionally be configured with a security group to be associated with the subnet.
+~> **NOTE on Virtual Networks and Subnet's:** Terraform currently
+provides both a standalone [Subnet resource](subnet.html), and allows for Subnets to be defined in-line within the [Virtual Network resource](virtual_network.html).
+At this time you cannot use a Virtual Network with in-line Subnets in conjunction with any Subnet resources. Doing so will cause a conflict of Subnet configurations and will overwrite Subnet's.
+
## Example Usage
```hcl