From 8f6e1c82524cefd482675e004e21ed5d678c1723 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Mon, 18 May 2020 19:22:24 -0700 Subject: [PATCH 01/48] Enable monitor on create --- azurerm/helpers/azure/hdinsight.go | 40 +++++++++ .../services/hdinsight/client/client.go | 5 ++ .../hdinsight_hadoop_cluster_resource.go | 32 +++++++ .../hdinsight_hadoop_cluster_resource_test.go | 90 +++++++++++++++++++ 4 files changed, 167 insertions(+) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index ca129004101e..dff800e1b785 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -156,6 +156,34 @@ func SchemaHDInsightsExternalMetastore() *schema.Schema { } } +func SchemaHDInsightsMonitor() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_analytics_workspace_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "primary_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + // Azure doesn't return the key + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return (new == d.Get(k).(string)) && (old == "*****") + }, + }, + }, + }, + } +} + func ExpandHDInsightsConfigurations(input []interface{}) map[string]interface{} { vs := input[0].(map[string]interface{}) @@ -253,6 +281,18 @@ func ExpandHDInsightsAmbariMetastore(input []interface{}) map[string]interface{} } } +func ExpandHDInsightsMonitor(input []interface{}) hdinsight.ClusterMonitoringRequest { + vs := input[0].(map[string]interface{}) + + workspace := vs["log_analytics_workspace_id"].(string) + key := vs["primary_key"].(string) + + return hdinsight.ClusterMonitoringRequest{ + WorkspaceID: utils.String(workspace), + PrimaryKey: utils.String(key), + } +} + func FlattenHDInsightsConfigurations(input map[string]*string) []interface{} { enabled := false if v, exists := input["restAuthCredential.isEnabled"]; exists && v != nil { diff --git a/azurerm/internal/services/hdinsight/client/client.go b/azurerm/internal/services/hdinsight/client/client.go index 9f96cfdc1559..afb902779c0c 100644 --- a/azurerm/internal/services/hdinsight/client/client.go +++ b/azurerm/internal/services/hdinsight/client/client.go @@ -9,6 +9,7 @@ type Client struct { ApplicationsClient *hdinsight.ApplicationsClient ClustersClient *hdinsight.ClustersClient ConfigurationsClient *hdinsight.ConfigurationsClient + ExtensionsClient *hdinsight.ExtensionsClient } func NewClient(o *common.ClientOptions) *Client { @@ -21,9 +22,13 @@ func NewClient(o *common.ClientOptions) *Client { ConfigurationsClient := hdinsight.NewConfigurationsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&ConfigurationsClient.Client, o.ResourceManagerAuthorizer) + ExtensionsClient := hdinsight.NewExtensionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&ExtensionsClient.Client, o.ResourceManagerAuthorizer) + return &Client{ ApplicationsClient: &ApplicationsClient, ClustersClient: &ClustersClient, ConfigurationsClient: &ConfigurationsClient, + ExtensionsClient: &ExtensionsClient, } } diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index b31f87c7813a..9994d092af53 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -180,6 +180,8 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -309,6 +311,21 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } } + // We can only enable monitoring after creation + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + monitor := azure.ExpandHDInsightsMonitor(monitorRaw) + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + future, err := extensionsClient.EnableMonitoring(ctx, resourceGroup, name, monitor) + if err != nil { + return err + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for enabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + return resourceArmHDInsightHadoopClusterRead(d, meta) } @@ -400,6 +417,21 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if *monitor.ClusterMonitoringEnabled { + d.Set("monitor", []interface{}{ + map[string]string{ + "log_analytics_workspace_id": *monitor.WorkspaceID, + "primary_key": "****", + }}) + } } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go index 05780c4b5fbc..ae6ef8b77b78 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go @@ -435,6 +435,32 @@ func TestAccAzureRMHDInsightHadoopCluster_updateMetastore(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightHadoopCluster_template(data) return fmt.Sprintf(` @@ -1316,3 +1342,67 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { } `, template, data.RandomInteger, data.RandomInteger) } + +func testAccAzureRMHDInsightHadoopCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightHadoopCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_hadoop_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + hadoop = "2.7" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomInteger, data.RandomInteger) +} From 6497a825f32d91100b009155dc7dabaf7a844efb Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 11:48:16 -0700 Subject: [PATCH 02/48] Implement monitor changes --- azurerm/helpers/azure/hdinsight.go | 4 +- .../services/hdinsight/common_hdinsight.go | 43 ++++++++++ .../hdinsight_hadoop_cluster_resource.go | 13 +-- .../hdinsight_hadoop_cluster_resource_test.go | 82 ++++++++++++++++++- 4 files changed, 129 insertions(+), 13 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index dff800e1b785..58ca354ccfd9 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -166,12 +166,12 @@ func SchemaHDInsightsMonitor() *schema.Schema { "log_analytics_workspace_id": { Type: schema.TypeString, Required: true, - ForceNew: true, + ForceNew: false, }, "primary_key": { Type: schema.TypeString, Required: true, - ForceNew: true, + ForceNew: false, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, // Azure doesn't return the key diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 265c68e56dc0..5cecb814af09 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -106,6 +106,22 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } } + if d.HasChange("monitor") { + log.Printf("[DEBUG] Chnage Azure Monitor for the HDInsight %q Cluster", clusterKind) + fmt.Println("Monitor change") + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } else { + if err := disableMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { + return nil + } + } + } + return readFunc(d, meta) } } @@ -325,3 +341,30 @@ func flattenHDInsightsMetastores(d *schema.ResourceData, configurations map[stri }) } } + +func enableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string, input []interface{}) error { + monitor := azure.ExpandHDInsightsMonitor(input) + future, err := client.EnableMonitoring(ctx, resourceGroup, name, monitor) + if err != nil { + return err + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for enabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return nil +} + +func disableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string) error { + future, err := client.DisableMonitoring(ctx, resourceGroup, name) + if err != nil { + return err + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for disabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return nil +} diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 9994d092af53..feed3b5fbff8 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -192,6 +192,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf defer cancel() name := d.Get("name").(string) + fmt.Printf("Create cluster %q", name) resourceGroup := d.Get("resource_group_name").(string) location := azure.NormalizeLocation(d.Get("location").(string)) clusterVersion := d.Get("cluster_version").(string) @@ -312,18 +313,12 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) - monitor := azure.ExpandHDInsightsMonitor(monitorRaw) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - future, err := extensionsClient.EnableMonitoring(ctx, resourceGroup, name, monitor) - if err != nil { + if err := enableMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { return err } - - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for enabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) - } } return resourceArmHDInsightHadoopClusterRead(d, meta) @@ -429,7 +424,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac d.Set("monitor", []interface{}{ map[string]string{ "log_analytics_workspace_id": *monitor.WorkspaceID, - "primary_key": "****", + "primary_key": "*****", }}) } } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go index ae6ef8b77b78..d6f2f576b551 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go @@ -461,6 +461,84 @@ func TestAccAzureRMHDInsightHadoopCluster_monitor(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightHadoopCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightHadoopCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightHadoopCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightHadoopCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightHadoopCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightHadoopCluster_template(data) return fmt.Sprintf(` @@ -1349,7 +1427,7 @@ func testAccAzureRMHDInsightHadoopCluster_monitor(data acceptance.TestData) stri %s resource "azurerm_log_analytics_workspace" "test" { - name = "acctestLAW-%d" + name = "acctestLAW-%s-%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name sku = "PerGB2018" @@ -1404,5 +1482,5 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { primary_key = azurerm_log_analytics_workspace.test.primary_shared_key } } -`, template, data.RandomInteger, data.RandomInteger) +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) } From 20ca28b77ba4bd069cfc38a7b62fc15d4b345beb Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 11:53:13 -0700 Subject: [PATCH 03/48] Error message clean up --- azurerm/internal/services/hdinsight/common_hdinsight.go | 5 ++--- .../services/hdinsight/hdinsight_hadoop_cluster_resource.go | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 5cecb814af09..785b4a36fa97 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -108,7 +108,6 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema if d.HasChange("monitor") { log.Printf("[DEBUG] Chnage Azure Monitor for the HDInsight %q Cluster", clusterKind) - fmt.Println("Monitor change") extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) @@ -350,7 +349,7 @@ func enableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, r } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for enabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("Error waiting for enabling monitor for HDInsight Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } return nil @@ -363,7 +362,7 @@ func disableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for disabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("Error waiting for disabling monitor for HDInsight Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } return nil diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index feed3b5fbff8..1c5e00eed905 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -192,7 +192,6 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf defer cancel() name := d.Get("name").(string) - fmt.Printf("Create cluster %q", name) resourceGroup := d.Get("resource_group_name").(string) location := azure.NormalizeLocation(d.Get("location").(string)) clusterVersion := d.Get("cluster_version").(string) From 6b8ac0d73560570cf539ed408916795a8cf3b902 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 12:01:48 -0700 Subject: [PATCH 04/48] Documentation for hadoop --- website/docs/r/hdinsight_hadoop_cluster.html.markdown | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/website/docs/r/hdinsight_hadoop_cluster.html.markdown b/website/docs/r/hdinsight_hadoop_cluster.html.markdown index bccdc6d1b00b..bcf1634c2ab6 100644 --- a/website/docs/r/hdinsight_hadoop_cluster.html.markdown +++ b/website/docs/r/hdinsight_hadoop_cluster.html.markdown @@ -113,6 +113,8 @@ The following arguments are supported: * `metastores` - (Optional) A `metastores` block as defined below. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -300,6 +302,13 @@ An `ambari` block supports the following: * `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. ## Attributes Reference From af64be0a8e4243b50a6b93fff2257c6e46120260 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 13:24:44 -0700 Subject: [PATCH 05/48] A bit of refactoring --- .../services/hdinsight/common_hdinsight.go | 20 +++++++++++++++---- .../hdinsight_hadoop_cluster_resource.go | 10 ++-------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 785b4a36fa97..2f1586cf2ed7 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -111,11 +111,11 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) - if err := enableMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { return err } } else { - if err := disableMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { + if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { return nil } } @@ -341,7 +341,19 @@ func flattenHDInsightsMetastores(d *schema.ResourceData, configurations map[stri } } -func enableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string, input []interface{}) error { +func flattenHDInsightMonitoring(monitor hdinsight.ClusterMonitoringResponse) []interface{} { + if *monitor.ClusterMonitoringEnabled { + return []interface{}{ + map[string]string{ + "log_analytics_workspace_id": *monitor.WorkspaceID, + "primary_key": "*****", + }} + } + + return nil +} + +func enableHDInsightMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string, input []interface{}) error { monitor := azure.ExpandHDInsightsMonitor(input) future, err := client.EnableMonitoring(ctx, resourceGroup, name, monitor) if err != nil { @@ -355,7 +367,7 @@ func enableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, r return nil } -func disableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string) error { +func disableHDInsightMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string) error { future, err := client.DisableMonitoring(ctx, resourceGroup, name) if err != nil { return err diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 1c5e00eed905..af22eb231c81 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -315,7 +315,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) - if err := enableMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { return err } } @@ -419,13 +419,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - if *monitor.ClusterMonitoringEnabled { - d.Set("monitor", []interface{}{ - map[string]string{ - "log_analytics_workspace_id": *monitor.WorkspaceID, - "primary_key": "*****", - }}) - } + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) From e45e69a7af7a97c191624391157195c32014f0ee Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 15:27:02 -0700 Subject: [PATCH 06/48] Aze monioor HBase cluster --- .../hdinsight_hbase_cluster_resource.go | 20 +++ .../hdinsight_hbase_cluster_resource_test.go | 168 ++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go index 31996c321c33..b1293167e1f2 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go @@ -118,6 +118,8 @@ func resourceArmHDInsightHBaseCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -214,6 +216,15 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightHBaseClusterRead(d, meta) } @@ -283,6 +294,15 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go index ff1def15af2b..e37e0c9b6a98 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go @@ -226,6 +226,110 @@ func TestAccAzureRMHDInsightHBaseCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightHBaseCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hbase_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHBaseCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightHBaseCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hbase_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightHBaseCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightHBaseCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightHBaseCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightHBaseCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightHBaseCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightHBaseCluster_template(data) return fmt.Sprintf(` @@ -794,3 +898,67 @@ resource "azurerm_hdinsight_hbase_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightHBaseCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightHBaseCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_hbase_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + hbase = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From 5566f35f4e96f21678f61c3e3962e18b128d3330 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 15:50:29 -0700 Subject: [PATCH 07/48] Azure Monitor for InteractiveQuery cluster --- ...ight_interactive_query_cluster_resource.go | 20 +++ ...interactive_query_cluster_resource_test.go | 168 ++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go index 716978f9d6b6..078cefaae615 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go @@ -118,6 +118,8 @@ func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -214,6 +216,15 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightInteractiveQueryClusterRead(d, meta) } @@ -283,6 +294,15 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go index 67db177e9c21..9469acbeb340 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go @@ -226,6 +226,110 @@ func TestAccAzureRMHDInsightInteractiveQueryCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightInteractiveQueryCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_interactive_query_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightInteractiveQueryCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_interactive_query_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightInteractiveQueryCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightInteractiveQueryCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightInteractiveQueryCluster_template(data) return fmt.Sprintf(` @@ -794,3 +898,67 @@ resource "azurerm_hdinsight_interactive_query_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightInteractiveQueryCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightInteractiveQueryCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_interactive_query_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + interactive_hive = "2.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From 1c551957223268bcb3a0ddb5f8be795338133d31 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 16:15:50 -0700 Subject: [PATCH 08/48] Azure monitor for Kafka --- .../hdinsight_kafka_cluster_resource.go | 20 +++ .../hdinsight_kafka_cluster_resource_test.go | 169 ++++++++++++++++++ 2 files changed, 189 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index dd04fe22f89c..c72b1914bae3 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -119,6 +119,8 @@ func resourceArmHDInsightKafkaCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -215,6 +217,15 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightKafkaClusterRead(d, meta) } @@ -284,6 +295,15 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go index 435d7dd7df0d..9b7fe1b35365 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go @@ -229,6 +229,110 @@ func TestAccAzureRMHDInsightKafkaCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightKafkaCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_kafka_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightKafkaCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightKafkaCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_kafka_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightKafkaCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightKafkaCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightKafkaCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightKafkaCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightKafkaCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightKafkaCluster_template(data) return fmt.Sprintf(` @@ -805,3 +909,68 @@ resource "azurerm_hdinsight_kafka_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightKafkaCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightKafkaCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_kafka_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + kafka = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 3 + number_of_disks_per_node = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From 475d20508cdf5047e99b46c3e1431df064f2e399 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 16:35:44 -0700 Subject: [PATCH 09/48] Azure monitor for Spark --- .../hdinsight_spark_cluster_resource.go | 20 +++ .../hdinsight_spark_cluster_resource_test.go | 168 ++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go index 985d07ac52e4..56da127b6d73 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go @@ -118,6 +118,8 @@ func resourceArmHDInsightSparkCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -214,6 +216,15 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightSparkClusterRead(d, meta) } @@ -283,6 +294,15 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go index 369e48428eff..af4b298e21c4 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go @@ -226,6 +226,110 @@ func TestAccAzureRMHDInsightSparkCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightSparkCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_spark_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightSparkCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightSparkCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_spark_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightSparkCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightSparkCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightSparkCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightSparkCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightSparkCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightSparkCluster_template(data) return fmt.Sprintf(` @@ -794,3 +898,67 @@ resource "azurerm_hdinsight_spark_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightSparkCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightSparkCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_spark_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + spark = "2.3" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From 416cbb26eec6591d49b06c7b61b92eabe3339720 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 17:02:15 -0700 Subject: [PATCH 10/48] Azure monitor for Storm --- .../hdinsight_storm_cluster_resource.go | 20 +++ .../hdinsight_storm_cluster_resource_test.go | 168 ++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go index 22aaff44c613..646eb30d1cbe 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go @@ -117,6 +117,8 @@ func resourceArmHDInsightStormCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -212,6 +214,15 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightStormClusterRead(d, meta) } @@ -281,6 +292,15 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go index 18b8bdea2338..48a5734b48e3 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go @@ -201,6 +201,110 @@ func TestAccAzureRMHDInsightStormCluster_tls(t *testing.T) { }) } +func TestAccAzureRMHDInsightStormCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_storm_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightStormCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightStormCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_storm_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightStormCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightStormCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightStormCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightStormCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightStormCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightStormCluster_template(data) return fmt.Sprintf(` @@ -671,3 +775,67 @@ resource "azurerm_hdinsight_storm_cluster" "test" { } `, template, data.RandomInteger) } + +func testAccAzureRMHDInsightStormCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightStormCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_storm_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + storm = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From 1220e10eea65e9f62fcc66ee2c0a12969aef6497 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 17:24:05 -0700 Subject: [PATCH 11/48] Azure monitor doc for all hdinsight resources --- website/docs/r/hdinsight_hbase_cluster.html.markdown | 10 ++++++++++ .../hdinsight_interactive_query_cluster.html.markdown | 10 ++++++++++ website/docs/r/hdinsight_kafka_cluster.html.markdown | 10 ++++++++++ website/docs/r/hdinsight_spark_cluster.html.markdown | 10 ++++++++++ website/docs/r/hdinsight_storm_cluster.html.markdown | 10 ++++++++++ 5 files changed, 50 insertions(+) diff --git a/website/docs/r/hdinsight_hbase_cluster.html.markdown b/website/docs/r/hdinsight_hbase_cluster.html.markdown index 4c1bb01e8d18..79f5dd774007 100644 --- a/website/docs/r/hdinsight_hbase_cluster.html.markdown +++ b/website/docs/r/hdinsight_hbase_cluster.html.markdown @@ -111,6 +111,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight HBase Cluster. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -233,6 +235,14 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown index e1fbd7814ab6..e31254d03464 100644 --- a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown +++ b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown @@ -109,6 +109,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Interactive Query Cluster. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -235,6 +237,14 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/hdinsight_kafka_cluster.html.markdown b/website/docs/r/hdinsight_kafka_cluster.html.markdown index 595a1dae5110..249c31a9df21 100644 --- a/website/docs/r/hdinsight_kafka_cluster.html.markdown +++ b/website/docs/r/hdinsight_kafka_cluster.html.markdown @@ -112,6 +112,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Kafka Cluster. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -236,6 +238,14 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/hdinsight_spark_cluster.html.markdown b/website/docs/r/hdinsight_spark_cluster.html.markdown index 6fbe8ab7d4e7..8f725495d4e4 100644 --- a/website/docs/r/hdinsight_spark_cluster.html.markdown +++ b/website/docs/r/hdinsight_spark_cluster.html.markdown @@ -111,6 +111,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Spark Cluster. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -233,6 +235,14 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: diff --git a/website/docs/r/hdinsight_storm_cluster.html.markdown b/website/docs/r/hdinsight_storm_cluster.html.markdown index e36adf352c20..7ff60ac9652c 100644 --- a/website/docs/r/hdinsight_storm_cluster.html.markdown +++ b/website/docs/r/hdinsight_storm_cluster.html.markdown @@ -109,6 +109,8 @@ The following arguments are supported: * `tags` - (Optional) A map of Tags which should be assigned to this HDInsight Storm Cluster. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -219,6 +221,14 @@ A `zookeeper_node` block supports the following: * `virtual_network_id` - (Optional) The ID of the Virtual Network where the Zookeeper Nodes should be provisioned within. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: From abdd57ac37c9a7243b23f9747a76030993e859e7 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Thu, 21 May 2020 09:21:51 -0700 Subject: [PATCH 12/48] Ensure that basic and monitor tests use the same roles --- .../tests/hdinsight_hbase_cluster_resource_test.go | 6 +++--- ...dinsight_interactive_query_cluster_resource_test.go | 4 ++-- .../tests/hdinsight_kafka_cluster_resource_test.go | 10 +++++----- .../tests/hdinsight_spark_cluster_resource_test.go | 8 ++++---- .../tests/hdinsight_storm_cluster_resource_test.go | 8 ++++---- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go index e37e0c9b6a98..12c796073280 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go @@ -936,20 +936,20 @@ resource "azurerm_hdinsight_hbase_cluster" "test" { roles { head_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } worker_node { - vm_size = "Standard_D4_V2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" target_instance_count = 2 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go index 9469acbeb340..ad4773867f4e 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go @@ -942,14 +942,14 @@ resource "azurerm_hdinsight_interactive_query_cluster" "test" { } worker_node { - vm_size = "Standard_D13_V2" + vm_size = "Standard_D14_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" target_instance_count = 2 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go index 9b7fe1b35365..49a53444240c 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go @@ -947,21 +947,21 @@ resource "azurerm_hdinsight_kafka_cluster" "test" { roles { head_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } worker_node { - vm_size = "Standard_D4_V2" - username = "acctestusrvm" - password = "AccTestvdSC4daf986!" + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" target_instance_count = 3 number_of_disks_per_node = 2 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go index af4b298e21c4..dfd0cf7bdbcb 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go @@ -936,20 +936,20 @@ resource "azurerm_hdinsight_spark_cluster" "test" { roles { head_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } worker_node { - vm_size = "Standard_D4_V2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" - target_instance_count = 2 + target_instance_count = 3 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Medium" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go index 48a5734b48e3..3fcdd053f3b6 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go @@ -813,20 +813,20 @@ resource "azurerm_hdinsight_storm_cluster" "test" { roles { head_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } worker_node { - vm_size = "Standard_D4_V2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" - target_instance_count = 2 + target_instance_count = 3 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } From 1a16f349c6bffb9335acb7bba45f159c64413f1d Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Thu, 21 May 2020 12:38:27 -0700 Subject: [PATCH 13/48] Fixing linter errors --- azurerm/internal/services/hdinsight/common_hdinsight.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 2f1586cf2ed7..60df46b62cbb 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -114,10 +114,8 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { return err } - } else { - if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { - return nil - } + } else if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { + return nil } } From 1bff630e95f61e0da1d4e75f90dbab62182a59f9 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 30 Jun 2020 13:26:32 -0700 Subject: [PATCH 14/48] Fix typo in debug output Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- azurerm/internal/services/hdinsight/common_hdinsight.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 7db2d00e576a..3b150d1e4281 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -107,7 +107,7 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } if d.HasChange("monitor") { - log.Printf("[DEBUG] Chnage Azure Monitor for the HDInsight %q Cluster", clusterKind) + log.Printf("[DEBUG] Change Azure Monitor for the HDInsight %q Cluster", clusterKind) extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) From 07b762437014a32f6c5baff4c4ba73877c3d1db1 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Wed, 1 Jul 2020 14:18:45 -0700 Subject: [PATCH 15/48] Moved extensionsClient --- .../services/hdinsight/hdinsight_hadoop_cluster_resource.go | 5 ++--- .../services/hdinsight/hdinsight_hbase_cluster_resource.go | 5 ++--- .../hdinsight_interactive_query_cluster_resource.go | 5 ++--- .../services/hdinsight/hdinsight_kafka_cluster_resource.go | 5 ++--- .../services/hdinsight/hdinsight_spark_cluster_resource.go | 5 ++--- .../services/hdinsight/hdinsight_storm_cluster_resource.go | 5 ++--- 6 files changed, 12 insertions(+), 18 deletions(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 6a1c7e997f04..ded213eba7f5 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -187,6 +187,7 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -311,7 +312,6 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -325,6 +325,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -411,8 +412,6 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go index f250752196e7..5d781933025a 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go @@ -125,6 +125,7 @@ func resourceArmHDInsightHBaseCluster() *schema.Resource { func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -216,7 +217,6 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -230,6 +230,7 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -294,8 +295,6 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go index b9e1f3b5c476..fee52b798c47 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go @@ -125,6 +125,7 @@ func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -216,7 +217,6 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -230,6 +230,7 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -294,8 +295,6 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index 995436890bb2..05009422494a 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -126,6 +126,7 @@ func resourceArmHDInsightKafkaCluster() *schema.Resource { func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -217,7 +218,6 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -231,6 +231,7 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -295,8 +296,6 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go index 5eaabbda3b32..8e8906875b74 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go @@ -125,6 +125,7 @@ func resourceArmHDInsightSparkCluster() *schema.Resource { func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -216,7 +217,6 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -230,6 +230,7 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -294,8 +295,6 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go index c3d0fe5369dd..8337f1b74bcb 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go @@ -123,6 +123,7 @@ func resourceArmHDInsightStormCluster() *schema.Resource { func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -213,7 +214,6 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -227,6 +227,7 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -291,8 +292,6 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) From 1a470985b9b8e668fb7e8867ec720080d53bd1d8 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Wed, 1 Jul 2020 14:25:26 -0700 Subject: [PATCH 16/48] Return error if disabling monitor fails --- azurerm/internal/services/hdinsight/common_hdinsight.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 3b150d1e4281..f3fda5dfba4d 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -115,7 +115,7 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema return err } } else if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { - return nil + return err } } if d.HasChange("gateway") { From 1c451ffa5893bd199ca848615737428bb569ae1e Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:08:06 -0700 Subject: [PATCH 17/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../hdinsight/hdinsight_interactive_query_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go index fee52b798c47..b640ed423fcf 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go @@ -297,7 +297,7 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From 8227f7c0a06bbff3a5f4fe7bd38b08cf0b263156 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:08:34 -0700 Subject: [PATCH 18/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/hdinsight_storm_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go index 8337f1b74bcb..f79c958c4231 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go @@ -294,7 +294,7 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From cf39c282f7274a509021949f0bb632bda70b1bdc Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:08:47 -0700 Subject: [PATCH 19/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/hdinsight_spark_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go index 8e8906875b74..f286afbb753d 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go @@ -297,7 +297,7 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From e2a7c8568996463d0e3265d886e5b00789899991 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:09:00 -0700 Subject: [PATCH 20/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/hdinsight_kafka_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index 05009422494a..b41d28e6dc2d 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -298,7 +298,7 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From 7dc5092ce38963baa952f270700ab697af72e894 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:09:14 -0700 Subject: [PATCH 21/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/hdinsight_hbase_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go index 5d781933025a..03bf17de1c6d 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go @@ -297,7 +297,7 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From 186a09cd66c98c6c86c24f4d81f9438d3f6ba054 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:10:49 -0700 Subject: [PATCH 22/48] Error messages --- .../services/hdinsight/hdinsight_hadoop_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index ded213eba7f5..825b5873578c 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -414,7 +414,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From b2365c1f01405fb9dbf378e16bd5b62c7820c83f Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 16:32:49 -0700 Subject: [PATCH 23/48] Remove redundant ForceNew --- azurerm/helpers/azure/hdinsight.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index b6a9082e64a4..ec575341e5aa 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -174,12 +174,10 @@ func SchemaHDInsightsMonitor() *schema.Schema { "log_analytics_workspace_id": { Type: schema.TypeString, Required: true, - ForceNew: false, }, "primary_key": { Type: schema.TypeString, Required: true, - ForceNew: false, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, // Azure doesn't return the key From 6738651e93c4046d790e021004ed63d04fc8a200 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Mon, 18 May 2020 19:22:24 -0700 Subject: [PATCH 24/48] Enable monitor on create --- azurerm/helpers/azure/hdinsight.go | 40 ++++++++ .../services/hdinsight/client/client.go | 5 + .../hdinsight_hadoop_cluster_resource.go | 32 +++++++ .../hdinsight_hadoop_cluster_resource_test.go | 93 +++++++++++++++++++ 4 files changed, 170 insertions(+) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 9b90c792b218..9322ac029eee 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -181,6 +181,34 @@ func SchemaHDInsightsExternalMetastores() *schema.Schema { } } +func SchemaHDInsightsMonitor() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_analytics_workspace_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "primary_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + ValidateFunc: validation.StringIsNotEmpty, + // Azure doesn't return the key + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return (new == d.Get(k).(string)) && (old == "*****") + }, + }, + }, + }, + } +} + func ExpandHDInsightsConfigurations(input []interface{}) map[string]interface{} { vs := input[0].(map[string]interface{}) @@ -278,6 +306,18 @@ func ExpandHDInsightsAmbariMetastore(input []interface{}) map[string]interface{} } } +func ExpandHDInsightsMonitor(input []interface{}) hdinsight.ClusterMonitoringRequest { + vs := input[0].(map[string]interface{}) + + workspace := vs["log_analytics_workspace_id"].(string) + key := vs["primary_key"].(string) + + return hdinsight.ClusterMonitoringRequest{ + WorkspaceID: utils.String(workspace), + PrimaryKey: utils.String(key), + } +} + func FlattenHDInsightsConfigurations(input map[string]*string) []interface{} { enabled := true diff --git a/azurerm/internal/services/hdinsight/client/client.go b/azurerm/internal/services/hdinsight/client/client.go index 9f96cfdc1559..afb902779c0c 100644 --- a/azurerm/internal/services/hdinsight/client/client.go +++ b/azurerm/internal/services/hdinsight/client/client.go @@ -9,6 +9,7 @@ type Client struct { ApplicationsClient *hdinsight.ApplicationsClient ClustersClient *hdinsight.ClustersClient ConfigurationsClient *hdinsight.ConfigurationsClient + ExtensionsClient *hdinsight.ExtensionsClient } func NewClient(o *common.ClientOptions) *Client { @@ -21,9 +22,13 @@ func NewClient(o *common.ClientOptions) *Client { ConfigurationsClient := hdinsight.NewConfigurationsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) o.ConfigureClient(&ConfigurationsClient.Client, o.ResourceManagerAuthorizer) + ExtensionsClient := hdinsight.NewExtensionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId) + o.ConfigureClient(&ExtensionsClient.Client, o.ResourceManagerAuthorizer) + return &Client{ ApplicationsClient: &ApplicationsClient, ClustersClient: &ClustersClient, ConfigurationsClient: &ConfigurationsClient, + ExtensionsClient: &ExtensionsClient, } } diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 9739ce076a5a..7bd2db9262de 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -166,6 +166,8 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -294,6 +296,21 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } } + // We can only enable monitoring after creation + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + monitor := azure.ExpandHDInsightsMonitor(monitorRaw) + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + future, err := extensionsClient.EnableMonitoring(ctx, resourceGroup, name, monitor) + if err != nil { + return err + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for enabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + return resourceArmHDInsightHadoopClusterRead(d, meta) } @@ -385,6 +402,21 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if *monitor.ClusterMonitoringEnabled { + d.Set("monitor", []interface{}{ + map[string]string{ + "log_analytics_workspace_id": *monitor.WorkspaceID, + "primary_key": "****", + }}) + } } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go index 835443ed53f0..b1804f7ed001 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go @@ -435,6 +435,34 @@ func TestAccAzureRMHDInsightHadoopCluster_updateMetastore(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHadoopCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + + + func TestAccAzureRMHDInsightHadoopCluster_updateGateway(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") resource.ParallelTest(t, resource.TestCase{ @@ -476,6 +504,7 @@ func TestAccAzureRMHDInsightHadoopCluster_updateGateway(t *testing.T) { }) } + func testAccAzureRMHDInsightHadoopCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightHadoopCluster_template(data) return fmt.Sprintf(` @@ -1357,6 +1386,70 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { `, template, data.RandomInteger, data.RandomInteger) } +func testAccAzureRMHDInsightHadoopCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightHadoopCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_hadoop_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + hadoop = "2.7" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomInteger, data.RandomInteger) +} + func testAccAzureRMHDInsightHadoopCluster_updateGateway(data acceptance.TestData) string { template := testAccAzureRMHDInsightHadoopCluster_template(data) return fmt.Sprintf(` From 930b73b3f2a9833757fbc19567ffc5b1c58474c7 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 11:48:16 -0700 Subject: [PATCH 25/48] Implement monitor changes --- azurerm/helpers/azure/hdinsight.go | 4 +- .../services/hdinsight/common_hdinsight.go | 44 +++++++++- .../hdinsight_hadoop_cluster_resource.go | 13 +-- .../hdinsight_hadoop_cluster_resource_test.go | 83 ++++++++++++++++++- 4 files changed, 128 insertions(+), 16 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 9322ac029eee..b0af66f993a5 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -191,12 +191,12 @@ func SchemaHDInsightsMonitor() *schema.Schema { "log_analytics_workspace_id": { Type: schema.TypeString, Required: true, - ForceNew: true, + ForceNew: false, }, "primary_key": { Type: schema.TypeString, Required: true, - ForceNew: true, + ForceNew: false, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, // Azure doesn't return the key diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 4d11f31d2572..c167d5b988b7 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -106,6 +106,7 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } } + //<<<<<<< HEAD if d.HasChange("gateway") { log.Printf("[DEBUG] Updating the HDInsight %q Cluster gateway", clusterKind) vs := d.Get("gateway").([]interface{})[0].(map[string]interface{}) @@ -128,7 +129,21 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) Gateway to be updated: %s", name, resourceGroup, err) } } - + if d.HasChange("monitor") { + log.Printf("[DEBUG] Chnage Azure Monitor for the HDInsight %q Cluster", clusterKind) + fmt.Println("Monitor change") + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } else { + if err := disableMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { + return nil + } + } + } return readFunc(d, meta) } } @@ -351,3 +366,30 @@ func flattenHDInsightsMetastores(d *schema.ResourceData, configurations map[stri }) } } + +func enableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string, input []interface{}) error { + monitor := azure.ExpandHDInsightsMonitor(input) + future, err := client.EnableMonitoring(ctx, resourceGroup, name, monitor) + if err != nil { + return err + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for enabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return nil +} + +func disableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string) error { + future, err := client.DisableMonitoring(ctx, resourceGroup, name) + if err != nil { + return err + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for disabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + return nil +} diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 7bd2db9262de..02429a0a2bd3 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -178,6 +178,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf defer cancel() name := d.Get("name").(string) + fmt.Printf("Create cluster %q", name) resourceGroup := d.Get("resource_group_name").(string) location := azure.NormalizeLocation(d.Get("location").(string)) clusterVersion := d.Get("cluster_version").(string) @@ -297,18 +298,12 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) - monitor := azure.ExpandHDInsightsMonitor(monitorRaw) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - future, err := extensionsClient.EnableMonitoring(ctx, resourceGroup, name, monitor) - if err != nil { + if err := enableMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { return err } - - if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for enabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) - } } return resourceArmHDInsightHadoopClusterRead(d, meta) @@ -414,7 +409,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac d.Set("monitor", []interface{}{ map[string]string{ "log_analytics_workspace_id": *monitor.WorkspaceID, - "primary_key": "****", + "primary_key": "*****", }}) } } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go index b1804f7ed001..d4dd6eaca4f7 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hadoop_cluster_resource_test.go @@ -461,8 +461,6 @@ func TestAccAzureRMHDInsightHadoopCluster_monitor(t *testing.T) { }) } - - func TestAccAzureRMHDInsightHadoopCluster_updateGateway(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") resource.ParallelTest(t, resource.TestCase{ @@ -504,6 +502,83 @@ func TestAccAzureRMHDInsightHadoopCluster_updateGateway(t *testing.T) { }) } +func TestAccAzureRMHDInsightHadoopCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hadoop_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightHadoopCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightHadoopCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightHadoopCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightHadoopCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} func testAccAzureRMHDInsightHadoopCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightHadoopCluster_template(data) @@ -1392,7 +1467,7 @@ func testAccAzureRMHDInsightHadoopCluster_monitor(data acceptance.TestData) stri %s resource "azurerm_log_analytics_workspace" "test" { - name = "acctestLAW-%d" + name = "acctestLAW-%s-%d" location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name sku = "PerGB2018" @@ -1447,7 +1522,7 @@ resource "azurerm_hdinsight_hadoop_cluster" "test" { primary_key = azurerm_log_analytics_workspace.test.primary_shared_key } } -`, template, data.RandomInteger, data.RandomInteger) +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) } func testAccAzureRMHDInsightHadoopCluster_updateGateway(data acceptance.TestData) string { From 42d8cf0ac6ff371d263cc2076ea3e1d665454a46 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 11:53:13 -0700 Subject: [PATCH 26/48] Error message clean up --- azurerm/internal/services/hdinsight/common_hdinsight.go | 5 ++--- .../services/hdinsight/hdinsight_hadoop_cluster_resource.go | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index c167d5b988b7..af9c70300e59 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -131,7 +131,6 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } if d.HasChange("monitor") { log.Printf("[DEBUG] Chnage Azure Monitor for the HDInsight %q Cluster", clusterKind) - fmt.Println("Monitor change") extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) @@ -375,7 +374,7 @@ func enableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, r } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for enabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("Error waiting for enabling monitor for HDInsight Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } return nil @@ -388,7 +387,7 @@ func disableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for disabling monitor for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("Error waiting for disabling monitor for HDInsight Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } return nil diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 02429a0a2bd3..5b81296034ff 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -178,7 +178,6 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf defer cancel() name := d.Get("name").(string) - fmt.Printf("Create cluster %q", name) resourceGroup := d.Get("resource_group_name").(string) location := azure.NormalizeLocation(d.Get("location").(string)) clusterVersion := d.Get("cluster_version").(string) From 85824a6a4930732c9c6fdfd24d257a04f189bf80 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 12:01:48 -0700 Subject: [PATCH 27/48] Documentation for hadoop --- website/docs/r/hdinsight_hadoop_cluster.html.markdown | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/website/docs/r/hdinsight_hadoop_cluster.html.markdown b/website/docs/r/hdinsight_hadoop_cluster.html.markdown index c3135adcbf34..b7e1c61871d5 100644 --- a/website/docs/r/hdinsight_hadoop_cluster.html.markdown +++ b/website/docs/r/hdinsight_hadoop_cluster.html.markdown @@ -112,6 +112,8 @@ The following arguments are supported: * `metastores` - (Optional) A `metastores` block as defined below. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -299,6 +301,13 @@ An `ambari` block supports the following: * `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. ## Attributes Reference From 21e9d99d66c93222fe76df7730cb8b7b07250ab1 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 13:24:44 -0700 Subject: [PATCH 28/48] A bit of refactoring --- .../services/hdinsight/common_hdinsight.go | 20 +++++++++++++++---- .../hdinsight_hadoop_cluster_resource.go | 10 ++-------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index af9c70300e59..1176ea1d88f2 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -134,11 +134,11 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) - if err := enableMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { return err } } else { - if err := disableMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { + if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { return nil } } @@ -366,7 +366,19 @@ func flattenHDInsightsMetastores(d *schema.ResourceData, configurations map[stri } } -func enableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string, input []interface{}) error { +func flattenHDInsightMonitoring(monitor hdinsight.ClusterMonitoringResponse) []interface{} { + if *monitor.ClusterMonitoringEnabled { + return []interface{}{ + map[string]string{ + "log_analytics_workspace_id": *monitor.WorkspaceID, + "primary_key": "*****", + }} + } + + return nil +} + +func enableHDInsightMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string, input []interface{}) error { monitor := azure.ExpandHDInsightsMonitor(input) future, err := client.EnableMonitoring(ctx, resourceGroup, name, monitor) if err != nil { @@ -380,7 +392,7 @@ func enableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, r return nil } -func disableMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string) error { +func disableHDInsightMonitoring(ctx context.Context, client *hdinsight.ExtensionsClient, resourceGroup, name string) error { future, err := client.DisableMonitoring(ctx, resourceGroup, name) if err != nil { return err diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 5b81296034ff..24b79c0cd13f 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -300,7 +300,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) - if err := enableMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { return err } } @@ -404,13 +404,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - if *monitor.ClusterMonitoringEnabled { - d.Set("monitor", []interface{}{ - map[string]string{ - "log_analytics_workspace_id": *monitor.WorkspaceID, - "primary_key": "*****", - }}) - } + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) From c9df321facb03bf495712cffe6bdd6f615783136 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 15:27:02 -0700 Subject: [PATCH 29/48] Aze monioor HBase cluster --- .../hdinsight_hbase_cluster_resource.go | 20 +++ .../hdinsight_hbase_cluster_resource_test.go | 168 ++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go index 7e1812b7e1a2..7987ace6872b 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go @@ -119,6 +119,8 @@ func resourceArmHDInsightHBaseCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -221,6 +223,15 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightHBaseClusterRead(d, meta) } @@ -298,6 +309,15 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go index 85e50592c7b8..6e64687af4d3 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go @@ -321,6 +321,110 @@ func TestAccAzureRMHDInsightHBaseCluster_updateMetastore(t *testing.T) { }) } +func TestAccAzureRMHDInsightHBaseCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hbase_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightHBaseCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightHBaseCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_hbase_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightHBaseCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightHBaseCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightHBaseCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightHBaseCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightHBaseCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightHBaseCluster_template(data) return fmt.Sprintf(` @@ -1072,3 +1176,67 @@ resource "azurerm_hdinsight_hbase_cluster" "test" { } `, template, data.RandomInteger, data.RandomInteger) } + +func testAccAzureRMHDInsightHBaseCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightHBaseCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_hbase_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + hbase = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From b5cf7446756f15e3bb229ee33053a21ee46c24b4 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 15:50:29 -0700 Subject: [PATCH 30/48] Azure Monitor for InteractiveQuery cluster --- ...ight_interactive_query_cluster_resource.go | 20 +++ ...interactive_query_cluster_resource_test.go | 168 ++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go index 07f9d4507c1e..354b15bf39ef 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go @@ -119,6 +119,8 @@ func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -221,6 +223,15 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightInteractiveQueryClusterRead(d, meta) } @@ -298,6 +309,15 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go index 947a8d1134c9..7e2437f04371 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go @@ -321,6 +321,110 @@ func TestAccAzureRMHDInsightInteractiveQueryCluster_updateMetastore(t *testing.T }) } +func TestAccAzureRMHDInsightInteractiveQueryCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_interactive_query_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightInteractiveQueryCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_interactive_query_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightInteractiveQueryCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightInteractiveQueryCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightInteractiveQueryCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightInteractiveQueryCluster_template(data) return fmt.Sprintf(` @@ -1072,3 +1176,67 @@ resource "azurerm_hdinsight_interactive_query_cluster" "test" { } `, template, data.RandomInteger, data.RandomInteger) } + +func testAccAzureRMHDInsightInteractiveQueryCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightInteractiveQueryCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_interactive_query_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + interactive_hive = "2.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D13_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From 5fca7ae8c6aa36b1fec34ed1d0db98edb3594419 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 16:15:50 -0700 Subject: [PATCH 31/48] Azure monitor for Kafka --- .../hdinsight_kafka_cluster_resource.go | 20 +++ .../hdinsight_kafka_cluster_resource_test.go | 169 ++++++++++++++++++ 2 files changed, 189 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index 1c7d1974305d..aa0c2ad2ea5d 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -120,6 +120,8 @@ func resourceArmHDInsightKafkaCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -222,6 +224,15 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightKafkaClusterRead(d, meta) } @@ -299,6 +310,15 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go index c4433ca0f86c..acd143abf077 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go @@ -324,6 +324,110 @@ func TestAccAzureRMHDInsightKafkaCluster_updateMetastore(t *testing.T) { }) } +func TestAccAzureRMHDInsightKafkaCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_kafka_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightKafkaCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightKafkaCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_kafka_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightKafkaCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightKafkaCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightKafkaCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightKafkaCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightKafkaCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightKafkaCluster_template(data) return fmt.Sprintf(` @@ -1085,3 +1189,68 @@ resource "azurerm_hdinsight_kafka_cluster" "test" { } `, template, data.RandomInteger, data.RandomInteger) } + +func testAccAzureRMHDInsightKafkaCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightKafkaCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_kafka_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + kafka = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 3 + number_of_disks_per_node = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From b1609cde072d4e81d3866f1c017933adb18cc1b4 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 16:35:44 -0700 Subject: [PATCH 32/48] Azure monitor for Spark --- .../hdinsight_spark_cluster_resource.go | 20 +++ .../hdinsight_spark_cluster_resource_test.go | 168 ++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go index 38e3d078e4e5..32a57ac949fd 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go @@ -119,6 +119,8 @@ func resourceArmHDInsightSparkCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -221,6 +223,15 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightSparkClusterRead(d, meta) } @@ -298,6 +309,15 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go index 1be3c4c8742e..65c0b913c02c 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go @@ -321,6 +321,110 @@ func TestAccAzureRMHDInsightSparkCluster_updateMetastore(t *testing.T) { }) } +func TestAccAzureRMHDInsightSparkCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_spark_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightSparkCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightSparkCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_spark_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightSparkCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightSparkCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightSparkCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightSparkCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightSparkCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightSparkCluster_template(data) return fmt.Sprintf(` @@ -1072,3 +1176,67 @@ resource "azurerm_hdinsight_spark_cluster" "test" { } `, template, data.RandomInteger, data.RandomInteger) } + +func testAccAzureRMHDInsightSparkCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightSparkCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_spark_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + spark = "2.3" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From 36f4e18118d4ce236766143f872513018191743a Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 17:02:15 -0700 Subject: [PATCH 33/48] Azure monitor for Storm --- .../hdinsight_storm_cluster_resource.go | 20 +++ .../hdinsight_storm_cluster_resource_test.go | 168 ++++++++++++++++++ 2 files changed, 188 insertions(+) diff --git a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go index bffffa1c149a..3376c44f1903 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go @@ -117,6 +117,8 @@ func resourceArmHDInsightStormCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "monitor": azure.SchemaHDInsightsMonitor(), }, } } @@ -218,6 +220,15 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) + // We can only enable monitoring after creation + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } + return resourceArmHDInsightStormClusterRead(d, meta) } @@ -295,6 +306,15 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface d.Set("https_endpoint", httpEndpoint) sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) + + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + + monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) + if err != nil { + return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + d.Set("monitor", flattenHDInsightMonitoring(monitor)) } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go index acbae46f87a9..061829645ada 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go @@ -296,6 +296,110 @@ func TestAccAzureRMHDInsightStormCluster_updateMetastore(t *testing.T) { }) } +func TestAccAzureRMHDInsightStormCluster_monitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_storm_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + { + Config: testAccAzureRMHDInsightStormCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + +func TestAccAzureRMHDInsightStormCluster_updateMonitor(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_hdinsight_storm_cluster", "test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMHDInsightClusterDestroy(data.ResourceType), + Steps: []resource.TestStep{ + // No monitor + { + Config: testAccAzureRMHDInsightStormCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Add monitor + { + Config: testAccAzureRMHDInsightStormCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Change Log Analytics Workspace for the monitor + { + PreConfig: func() { + data.RandomString += "new" + }, + Config: testAccAzureRMHDInsightStormCluster_monitor(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + // Remove monitor + { + Config: testAccAzureRMHDInsightStormCluster_basic(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMHDInsightClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "https_endpoint"), + resource.TestCheckResourceAttrSet(data.ResourceName, "ssh_endpoint"), + ), + }, + data.ImportStep("roles.0.head_node.0.password", + "roles.0.head_node.0.vm_size", + "roles.0.worker_node.0.password", + "roles.0.worker_node.0.vm_size", + "roles.0.zookeeper_node.0.password", + "roles.0.zookeeper_node.0.vm_size", + "storage_account"), + }, + }) +} + func testAccAzureRMHDInsightStormCluster_basic(data acceptance.TestData) string { template := testAccAzureRMHDInsightStormCluster_template(data) return fmt.Sprintf(` @@ -949,3 +1053,67 @@ resource "azurerm_hdinsight_storm_cluster" "test" { } `, template, data.RandomInteger, data.RandomInteger) } + +func testAccAzureRMHDInsightStormCluster_monitor(data acceptance.TestData) string { + template := testAccAzureRMHDInsightStormCluster_template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_log_analytics_workspace" "test" { + name = "acctestLAW-%s-%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + sku = "PerGB2018" +} + +resource "azurerm_hdinsight_storm_cluster" "test" { + name = "acctesthdi-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + cluster_version = "3.6" + tier = "Standard" + + component_version { + storm = "1.1" + } + + gateway { + enabled = true + username = "acctestusrgw" + password = "TerrAform123!" + } + + storage_account { + storage_container_id = azurerm_storage_container.test.id + storage_account_key = azurerm_storage_account.test.primary_access_key + is_default = true + } + + roles { + head_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + + worker_node { + vm_size = "Standard_D4_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + target_instance_count = 2 + } + + zookeeper_node { + vm_size = "Standard_D3_v2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" + } + } + + monitor { + log_analytics_workspace_id = azurerm_log_analytics_workspace.test.workspace_id + primary_key = azurerm_log_analytics_workspace.test.primary_shared_key + } +} +`, template, data.RandomString, data.RandomInteger, data.RandomInteger) +} From 972c1b74dd02798290a64d814c5a0659ab9584db Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 19 May 2020 17:24:05 -0700 Subject: [PATCH 34/48] Azure monitor doc for all hdinsight resources --- website/docs/r/hdinsight_hbase_cluster.html.markdown | 12 ++++++++++-- ...hdinsight_interactive_query_cluster.html.markdown | 12 ++++++++++-- website/docs/r/hdinsight_kafka_cluster.html.markdown | 12 ++++++++++-- website/docs/r/hdinsight_spark_cluster.html.markdown | 11 +++++++++-- website/docs/r/hdinsight_storm_cluster.html.markdown | 12 ++++++++++-- 5 files changed, 49 insertions(+), 10 deletions(-) diff --git a/website/docs/r/hdinsight_hbase_cluster.html.markdown b/website/docs/r/hdinsight_hbase_cluster.html.markdown index 08ced804b8b4..2e3a02b58f41 100644 --- a/website/docs/r/hdinsight_hbase_cluster.html.markdown +++ b/website/docs/r/hdinsight_hbase_cluster.html.markdown @@ -112,6 +112,8 @@ The following arguments are supported: * `metastores` - (Optional) A `metastores` block as defined below. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -281,6 +283,14 @@ An `ambari` block supports the following: * `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: @@ -293,8 +303,6 @@ The following attributes are exported: ## Timeouts - - The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: * `create` - (Defaults to 60 minutes) Used when creating the HBase HDInsight Cluster. diff --git a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown index e6d6460a3ba2..e453ec7f4d43 100644 --- a/website/docs/r/hdinsight_interactive_query_cluster.html.markdown +++ b/website/docs/r/hdinsight_interactive_query_cluster.html.markdown @@ -111,6 +111,8 @@ The following arguments are supported: * `metastores` - (Optional) A `metastores` block as defined below. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -284,6 +286,14 @@ An `ambari` block supports the following: * `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: @@ -296,8 +306,6 @@ The following attributes are exported: ## Timeouts - - The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: * `create` - (Defaults to 60 minutes) Used when creating the Interactive Query HDInsight Cluster. diff --git a/website/docs/r/hdinsight_kafka_cluster.html.markdown b/website/docs/r/hdinsight_kafka_cluster.html.markdown index 93bbcba60962..219c078874de 100644 --- a/website/docs/r/hdinsight_kafka_cluster.html.markdown +++ b/website/docs/r/hdinsight_kafka_cluster.html.markdown @@ -113,6 +113,8 @@ The following arguments are supported: * `metastores` - (Optional) A `metastores` block as defined below. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -284,6 +286,14 @@ An `ambari` block supports the following: * `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: @@ -296,8 +306,6 @@ The following attributes are exported: ## Timeouts - - The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: * `create` - (Defaults to 60 minutes) Used when creating the Kafka HDInsight Cluster. diff --git a/website/docs/r/hdinsight_spark_cluster.html.markdown b/website/docs/r/hdinsight_spark_cluster.html.markdown index 771c6ad1ef40..2cd8e633090f 100644 --- a/website/docs/r/hdinsight_spark_cluster.html.markdown +++ b/website/docs/r/hdinsight_spark_cluster.html.markdown @@ -112,6 +112,8 @@ The following arguments are supported: * `metastores` - (Optional) A `metastores` block as defined below. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -281,6 +283,13 @@ An `ambari` block supports the following: * `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. ## Attributes Reference @@ -294,8 +303,6 @@ The following attributes are exported: ## Timeouts - - The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: * `create` - (Defaults to 60 minutes) Used when creating the Spark HDInsight Cluster. diff --git a/website/docs/r/hdinsight_storm_cluster.html.markdown b/website/docs/r/hdinsight_storm_cluster.html.markdown index ace3366c4d6c..f82c7b7246b2 100644 --- a/website/docs/r/hdinsight_storm_cluster.html.markdown +++ b/website/docs/r/hdinsight_storm_cluster.html.markdown @@ -110,6 +110,8 @@ The following arguments are supported: * `metastores` - (Optional) A `metastores` block as defined below. +* `monitor` - (Optional) A `monitor` block as defined below. + --- A `component_version` block supports the following: @@ -267,6 +269,14 @@ An `ambari` block supports the following: * `password` - (Required) The external Ambari metastore's existing SQL server admin password. Changing this forces a new resource to be created. +--- + +A `monitor` block supports the following: + +* `log_analytics_workspace_id` - (Required) The Operations Management Suite (OMS) workspace ID. + +* `primary_key` - (Required) The Operations Management Suite (OMS) workspace key. + ## Attributes Reference The following attributes are exported: @@ -279,8 +289,6 @@ The following attributes are exported: ## Timeouts - - The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: * `create` - (Defaults to 60 minutes) Used when creating the Storm HDInsight Cluster. From da5845f76052dad821900dec10024ad0a6415cec Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Thu, 21 May 2020 09:21:51 -0700 Subject: [PATCH 35/48] Ensure that basic and monitor tests use the same roles --- .../tests/hdinsight_hbase_cluster_resource_test.go | 6 +++--- ...dinsight_interactive_query_cluster_resource_test.go | 4 ++-- .../tests/hdinsight_kafka_cluster_resource_test.go | 10 +++++----- .../tests/hdinsight_spark_cluster_resource_test.go | 8 ++++---- .../tests/hdinsight_storm_cluster_resource_test.go | 8 ++++---- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go index 6e64687af4d3..da733bd9ca6a 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_hbase_cluster_resource_test.go @@ -1214,20 +1214,20 @@ resource "azurerm_hdinsight_hbase_cluster" "test" { roles { head_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } worker_node { - vm_size = "Standard_D4_V2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" target_instance_count = 2 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go index 7e2437f04371..23e6c6250d3a 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_interactive_query_cluster_resource_test.go @@ -1220,14 +1220,14 @@ resource "azurerm_hdinsight_interactive_query_cluster" "test" { } worker_node { - vm_size = "Standard_D13_V2" + vm_size = "Standard_D14_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" target_instance_count = 2 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go index acd143abf077..a49218352ff6 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_kafka_cluster_resource_test.go @@ -1227,21 +1227,21 @@ resource "azurerm_hdinsight_kafka_cluster" "test" { roles { head_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } worker_node { - vm_size = "Standard_D4_V2" - username = "acctestusrvm" - password = "AccTestvdSC4daf986!" + vm_size = "Standard_D3_V2" + username = "acctestusrvm" + password = "AccTestvdSC4daf986!" target_instance_count = 3 number_of_disks_per_node = 2 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_D3_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go index 65c0b913c02c..72d2d0ff76ff 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_spark_cluster_resource_test.go @@ -1214,20 +1214,20 @@ resource "azurerm_hdinsight_spark_cluster" "test" { roles { head_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } worker_node { - vm_size = "Standard_D4_V2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" - target_instance_count = 2 + target_instance_count = 3 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Medium" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } diff --git a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go index 061829645ada..4fa433e833dd 100644 --- a/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go +++ b/azurerm/internal/services/hdinsight/tests/hdinsight_storm_cluster_resource_test.go @@ -1091,20 +1091,20 @@ resource "azurerm_hdinsight_storm_cluster" "test" { roles { head_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } worker_node { - vm_size = "Standard_D4_V2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" - target_instance_count = 2 + target_instance_count = 3 } zookeeper_node { - vm_size = "Standard_D3_v2" + vm_size = "Standard_A4_V2" username = "acctestusrvm" password = "AccTestvdSC4daf986!" } From 0b96308fc28c60db62b5d82607115d0fe108a883 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Thu, 21 May 2020 12:38:27 -0700 Subject: [PATCH 36/48] Fixing linter errors --- azurerm/internal/services/hdinsight/common_hdinsight.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index 1176ea1d88f2..ec0c969b07d0 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -137,10 +137,8 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { return err } - } else { - if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { - return nil - } + } else if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { + return nil } } return readFunc(d, meta) From dfa757a22c18b5d5564ea2b2b52c1aa7bc01935f Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 30 Jun 2020 13:26:32 -0700 Subject: [PATCH 37/48] Fix typo in debug output Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/common_hdinsight.go | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index ec0c969b07d0..b6d9d4ee9142 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -106,7 +106,19 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema } } - //<<<<<<< HEAD + if d.HasChange("monitor") { + log.Printf("[DEBUG] Change Azure Monitor for the HDInsight %q Cluster", clusterKind) + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient + if v, ok := d.GetOk("monitor"); ok { + monitorRaw := v.([]interface{}) + if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { + return err + } + } else if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { + return nil + } + } + if d.HasChange("gateway") { log.Printf("[DEBUG] Updating the HDInsight %q Cluster gateway", clusterKind) vs := d.Get("gateway").([]interface{})[0].(map[string]interface{}) @@ -129,18 +141,7 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) Gateway to be updated: %s", name, resourceGroup, err) } } - if d.HasChange("monitor") { - log.Printf("[DEBUG] Chnage Azure Monitor for the HDInsight %q Cluster", clusterKind) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - if v, ok := d.GetOk("monitor"); ok { - monitorRaw := v.([]interface{}) - if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { - return err - } - } else if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { - return nil - } - } + return readFunc(d, meta) } } From f5308cd9753370ecf2976cfc5f7b8da0f8691a2e Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Wed, 1 Jul 2020 14:18:45 -0700 Subject: [PATCH 38/48] Moved extensionsClient --- .../services/hdinsight/hdinsight_hadoop_cluster_resource.go | 5 ++--- .../services/hdinsight/hdinsight_hbase_cluster_resource.go | 5 ++--- .../hdinsight_interactive_query_cluster_resource.go | 5 ++--- .../services/hdinsight/hdinsight_kafka_cluster_resource.go | 5 ++--- .../services/hdinsight/hdinsight_spark_cluster_resource.go | 5 ++--- .../services/hdinsight/hdinsight_storm_cluster_resource.go | 5 ++--- 6 files changed, 12 insertions(+), 18 deletions(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 24b79c0cd13f..c449987aab92 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -174,6 +174,7 @@ func resourceArmHDInsightHadoopCluster() *schema.Resource { func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -297,7 +298,6 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -311,6 +311,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -397,8 +398,6 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go index 7987ace6872b..42698e993e4a 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go @@ -127,6 +127,7 @@ func resourceArmHDInsightHBaseCluster() *schema.Resource { func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -224,7 +225,6 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -238,6 +238,7 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -310,8 +311,6 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go index 354b15bf39ef..229f1f592416 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go @@ -127,6 +127,7 @@ func resourceArmHDInsightInteractiveQueryCluster() *schema.Resource { func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -224,7 +225,6 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -238,6 +238,7 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -310,8 +311,6 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index aa0c2ad2ea5d..90fc878452c2 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -128,6 +128,7 @@ func resourceArmHDInsightKafkaCluster() *schema.Resource { func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -225,7 +226,6 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -239,6 +239,7 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -311,8 +312,6 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go index 32a57ac949fd..cadab6aefd33 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go @@ -127,6 +127,7 @@ func resourceArmHDInsightSparkCluster() *schema.Resource { func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -224,7 +225,6 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -238,6 +238,7 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -310,8 +311,6 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) diff --git a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go index 3376c44f1903..a666dbd5a8d5 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go @@ -125,6 +125,7 @@ func resourceArmHDInsightStormCluster() *schema.Resource { func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -221,7 +222,6 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa d.SetId(*read.ID) // We can only enable monitoring after creation - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { @@ -235,6 +235,7 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface{}) error { clustersClient := meta.(*clients.Client).HDInsight.ClustersClient configurationsClient := meta.(*clients.Client).HDInsight.ConfigurationsClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) defer cancel() @@ -307,8 +308,6 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface sshEndpoint := azure.FindHDInsightConnectivityEndpoint("SSH", props.ConnectivityEndpoints) d.Set("ssh_endpoint", sshEndpoint) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient - monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) From 0b89eb5f60c9d8769f8082bc7adb98579058df96 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Wed, 1 Jul 2020 14:25:26 -0700 Subject: [PATCH 39/48] Return error if disabling monitor fails --- azurerm/internal/services/hdinsight/common_hdinsight.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index b6d9d4ee9142..eeb7bd05da87 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -115,7 +115,7 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema return err } } else if err := disableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name); err != nil { - return nil + return err } } From 7350c63651aa67296cdf0654bc697c08c628a653 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:08:06 -0700 Subject: [PATCH 40/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../hdinsight/hdinsight_interactive_query_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go index 229f1f592416..e3cf1ca149bc 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go @@ -313,7 +313,7 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From de60a550b2f5102d52cd5bb29dddfe2f814a3614 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:08:34 -0700 Subject: [PATCH 41/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/hdinsight_storm_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go index a666dbd5a8d5..400251c3fcc8 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go @@ -310,7 +310,7 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From ef9b7a0a0b09627859d385da782a6f3f6a2a44ed Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:08:47 -0700 Subject: [PATCH 42/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/hdinsight_spark_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go index cadab6aefd33..507d2bf69907 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go @@ -313,7 +313,7 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From f9f25c62075034b4dabd543e32cb05ff49fe3458 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:09:00 -0700 Subject: [PATCH 43/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/hdinsight_kafka_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index 90fc878452c2..d2b81a738057 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -314,7 +314,7 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From 033cb71cdb0d477b0f5232b30ae024e4757ebb4d Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Tue, 7 Jul 2020 15:09:14 -0700 Subject: [PATCH 44/48] Error messages Co-authored-by: Steve <11830746+jackofallops@users.noreply.github.com> --- .../services/hdinsight/hdinsight_hbase_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go index 42698e993e4a..30ac10e86eef 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go @@ -313,7 +313,7 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) From 31c618816bf513c6adb3d6e22f721eaf06bbd978 Mon Sep 17 00:00:00 2001 From: jackofallops Date: Wed, 8 Jul 2020 15:22:46 +0100 Subject: [PATCH 45/48] post conflict rebase cleanup and catchup for hdinsight service --- azurerm/helpers/azure/hdinsight.go | 2 -- .../hdinsight_hadoop_cluster_resource.go | 34 +++++++++---------- .../hdinsight_hbase_cluster_resource.go | 26 +++++++------- ...ight_interactive_query_cluster_resource.go | 26 +++++++------- .../hdinsight_kafka_cluster_resource.go | 26 +++++++------- .../hdinsight_spark_cluster_resource.go | 26 +++++++------- .../hdinsight_storm_cluster_resource.go | 26 +++++++------- 7 files changed, 82 insertions(+), 84 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index b0af66f993a5..67bdeef40411 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -191,12 +191,10 @@ func SchemaHDInsightsMonitor() *schema.Schema { "log_analytics_workspace_id": { Type: schema.TypeString, Required: true, - ForceNew: false, }, "primary_key": { Type: schema.TypeString, Required: true, - ForceNew: false, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, // Azure doesn't return the key diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index c449987aab92..57da2ee05e7f 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -202,7 +202,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { - return fmt.Errorf("Error expanding `storage_account`: %s", err) + return fmt.Errorf("failure expanding `storage_account`: %s", err) } rolesRaw := d.Get("roles").([]interface{}) @@ -213,14 +213,14 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } roles, err := expandHDInsightRoles(rolesRaw, hadoopRoles) if err != nil { - return fmt.Errorf("Error expanding `roles`: %+v", err) + return fmt.Errorf("failure expanding `roles`: %+v", err) } if features.ShouldResourcesBeImported() { existing, err := client.Get(ctx, resourceGroup, name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure checking for presence of existing HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } } @@ -253,20 +253,20 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { - return fmt.Errorf("Error creating HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure creating HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for creation of HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed waiting for creation of HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } read, err := client.Get(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if read.ID == nil { - return fmt.Errorf("Error reading ID for HDInsight Hadoop Cluster %q (Resource Group %q)", name, resourceGroup) + return fmt.Errorf("failure reading ID for HDInsight Hadoop Cluster %q (Resource Group %q)", name, resourceGroup) } d.SetId(*read.ID) @@ -293,7 +293,7 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) + return fmt.Errorf("failure waiting for HDInsight Cluster %q (Resource Group %q) to be running: %s", name, resourceGroup, err) } } @@ -331,18 +331,18 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac return nil } - return fmt.Errorf("Error retrieving HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving Configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving Configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } gateway, exists := configurations.Configurations["gateway"] if !exists { - return fmt.Errorf("Error retrieving gateway for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving gateway for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("name", name) @@ -359,11 +359,11 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac if def := props.ClusterDefinition; def != nil { if err := d.Set("component_version", flattenHDInsightHadoopComponentVersion(def.ComponentVersion)); err != nil { - return fmt.Errorf("Error flattening `component_version`: %+v", err) + return fmt.Errorf("failure flattening `component_version`: %+v", err) } if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { - return fmt.Errorf("Error flattening `gateway`: %+v", err) + return fmt.Errorf("failure flattening `gateway`: %+v", err) } flattenHDInsightsMetastores(d, configurations.Configurations) @@ -381,7 +381,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac edgeNode, err := applicationsClient.Get(ctx, resourceGroup, name, name) if err != nil { if !utils.ResponseWasNotFound(edgeNode.Response) { - return fmt.Errorf("Error reading edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure reading edge node for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } } @@ -390,7 +390,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac } if err := d.Set("roles", flattenedRoles); err != nil { - return fmt.Errorf("Error flattening `roles`: %+v", err) + return fmt.Errorf("failure flattening `roles`: %+v", err) } httpEndpoint := azure.FindHDInsightConnectivityEndpoint("HTTPS", props.ConnectivityEndpoints) @@ -400,7 +400,7 @@ func resourceArmHDInsightHadoopClusterRead(d *schema.ResourceData, meta interfac monitor, err := extensionsClient.GetMonitoringStatus(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error reading monitor configuation for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure reading monitor configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("monitor", flattenHDInsightMonitoring(monitor)) @@ -492,7 +492,7 @@ func hdInsightWaitForReadyRefreshFunc(ctx context.Context, client *hdinsight.Clu return func() (interface{}, string, error) { res, err := client.Get(ctx, resourceGroupName, name) if err != nil { - return nil, "Error", fmt.Errorf("Error issuing read request in hdInsightWaitForReadyRefreshFunc to Hadoop Cluster %q (Resource Group %q): %s", name, resourceGroupName, err) + return nil, "Error", fmt.Errorf("failure issuing read request in hdInsightWaitForReadyRefreshFunc to Hadoop Cluster %q (Resource Group %q): %s", name, resourceGroupName, err) } if props := res.Properties; props != nil { if state := props.ClusterState; state != nil { diff --git a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go index 30ac10e86eef..e5970e511a63 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hbase_cluster_resource.go @@ -155,7 +155,7 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { - return fmt.Errorf("Error expanding `storage_account`: %s", err) + return fmt.Errorf("failure expanding `storage_account`: %s", err) } hbaseRoles := hdInsightRoleDefinition{ @@ -166,14 +166,14 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa rolesRaw := d.Get("roles").([]interface{}) roles, err := expandHDInsightRoles(rolesRaw, hbaseRoles) if err != nil { - return fmt.Errorf("Error expanding `roles`: %+v", err) + return fmt.Errorf("failure expanding `roles`: %+v", err) } if features.ShouldResourcesBeImported() { existing, err := client.Get(ctx, resourceGroup, name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure checking for presence of existing HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } } @@ -206,20 +206,20 @@ func resourceArmHDInsightHBaseClusterCreate(d *schema.ResourceData, meta interfa } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { - return fmt.Errorf("Error creating HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure creating HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for creation of HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed waiting for creation of HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } read, err := client.Get(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if read.ID == nil { - return fmt.Errorf("Error reading ID for HDInsight HBase Cluster %q (Resource Group %q)", name, resourceGroup) + return fmt.Errorf("failure reading ID for HDInsight HBase Cluster %q (Resource Group %q)", name, resourceGroup) } d.SetId(*read.ID) @@ -258,18 +258,18 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface return nil } - return fmt.Errorf("Error retrieving HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving Configuration for HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving Configuration for HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } gateway, exists := configurations.Configurations["gateway"] if !exists { - return fmt.Errorf("Error retrieving gateway for HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving gateway for HDInsight HBase Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("name", name) @@ -286,11 +286,11 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface if def := props.ClusterDefinition; def != nil { if err := d.Set("component_version", flattenHDInsightHBaseComponentVersion(def.ComponentVersion)); err != nil { - return fmt.Errorf("Error flattening `component_version`: %+v", err) + return fmt.Errorf("failure flattening `component_version`: %+v", err) } if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { - return fmt.Errorf("Error flattening `gateway`: %+v", err) + return fmt.Errorf("failure flattening `gateway`: %+v", err) } flattenHDInsightsMetastores(d, configurations.Configurations) @@ -303,7 +303,7 @@ func resourceArmHDInsightHBaseClusterRead(d *schema.ResourceData, meta interface } flattenedRoles := flattenHDInsightRoles(d, props.ComputeProfile, hbaseRoles) if err := d.Set("roles", flattenedRoles); err != nil { - return fmt.Errorf("Error flattening `roles`: %+v", err) + return fmt.Errorf("failure flattening `roles`: %+v", err) } httpEndpoint := azure.FindHDInsightConnectivityEndpoint("HTTPS", props.ConnectivityEndpoints) diff --git a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go index e3cf1ca149bc..57163d4a4fa1 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_interactive_query_cluster_resource.go @@ -155,7 +155,7 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { - return fmt.Errorf("Error expanding `storage_account`: %s", err) + return fmt.Errorf("failure expanding `storage_account`: %s", err) } interactiveQueryRoles := hdInsightRoleDefinition{ @@ -166,14 +166,14 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m rolesRaw := d.Get("roles").([]interface{}) roles, err := expandHDInsightRoles(rolesRaw, interactiveQueryRoles) if err != nil { - return fmt.Errorf("Error expanding `roles`: %+v", err) + return fmt.Errorf("failure expanding `roles`: %+v", err) } if features.ShouldResourcesBeImported() { existing, err := client.Get(ctx, resourceGroup, name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing HDInsight InteractiveQuery Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure checking for presence of existing HDInsight InteractiveQuery Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } } @@ -206,20 +206,20 @@ func resourceArmHDInsightInteractiveQueryClusterCreate(d *schema.ResourceData, m } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { - return fmt.Errorf("Error creating HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure creating HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for creation of HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed waiting for creation of HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } read, err := client.Get(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if read.ID == nil { - return fmt.Errorf("Error reading ID for HDInsight Interactive Query Cluster %q (Resource Group %q)", name, resourceGroup) + return fmt.Errorf("failure reading ID for HDInsight Interactive Query Cluster %q (Resource Group %q)", name, resourceGroup) } d.SetId(*read.ID) @@ -258,18 +258,18 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met return nil } - return fmt.Errorf("Error retrieving HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving Configuration for HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving Configuration for HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } gateway, exists := configurations.Configurations["gateway"] if !exists { - return fmt.Errorf("Error retrieving gateway for HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving gateway for HDInsight Interactive Query Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("name", name) @@ -286,11 +286,11 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met if def := props.ClusterDefinition; def != nil { if err := d.Set("component_version", flattenHDInsightInteractiveQueryComponentVersion(def.ComponentVersion)); err != nil { - return fmt.Errorf("Error flattening `component_version`: %+v", err) + return fmt.Errorf("failure flattening `component_version`: %+v", err) } if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { - return fmt.Errorf("Error flattening `gateway`: %+v", err) + return fmt.Errorf("failure flattening `gateway`: %+v", err) } flattenHDInsightsMetastores(d, configurations.Configurations) @@ -303,7 +303,7 @@ func resourceArmHDInsightInteractiveQueryClusterRead(d *schema.ResourceData, met } flattenedRoles := flattenHDInsightRoles(d, props.ComputeProfile, interactiveQueryRoles) if err := d.Set("roles", flattenedRoles); err != nil { - return fmt.Errorf("Error flattening `roles`: %+v", err) + return fmt.Errorf("failure flattening `roles`: %+v", err) } httpEndpoint := azure.FindHDInsightConnectivityEndpoint("HTTPS", props.ConnectivityEndpoints) diff --git a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go index d2b81a738057..ed5a954a2e77 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_kafka_cluster_resource.go @@ -156,7 +156,7 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { - return fmt.Errorf("Error expanding `storage_account`: %s", err) + return fmt.Errorf("failure expanding `storage_account`: %s", err) } kafkaRoles := hdInsightRoleDefinition{ @@ -167,14 +167,14 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa rolesRaw := d.Get("roles").([]interface{}) roles, err := expandHDInsightRoles(rolesRaw, kafkaRoles) if err != nil { - return fmt.Errorf("Error expanding `roles`: %+v", err) + return fmt.Errorf("failure expanding `roles`: %+v", err) } if features.ShouldResourcesBeImported() { existing, err := client.Get(ctx, resourceGroup, name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure checking for presence of existing HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } } @@ -207,20 +207,20 @@ func resourceArmHDInsightKafkaClusterCreate(d *schema.ResourceData, meta interfa } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { - return fmt.Errorf("Error creating HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure creating HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for creation of HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed waiting for creation of HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } read, err := client.Get(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if read.ID == nil { - return fmt.Errorf("Error reading ID for HDInsight Kafka Cluster %q (Resource Group %q)", name, resourceGroup) + return fmt.Errorf("failure reading ID for HDInsight Kafka Cluster %q (Resource Group %q)", name, resourceGroup) } d.SetId(*read.ID) @@ -259,18 +259,18 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface return nil } - return fmt.Errorf("Error retrieving HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Kafka Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving Configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving Configuration for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } gateway, exists := configurations.Configurations["gateway"] if !exists { - return fmt.Errorf("Error retrieving gateway for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving gateway for HDInsight Hadoop Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("name", name) @@ -287,11 +287,11 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface if def := props.ClusterDefinition; def != nil { if err := d.Set("component_version", flattenHDInsightKafkaComponentVersion(def.ComponentVersion)); err != nil { - return fmt.Errorf("Error flattening `component_version`: %+v", err) + return fmt.Errorf("failure flattening `component_version`: %+v", err) } if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { - return fmt.Errorf("Error flattening `gateway`: %+v", err) + return fmt.Errorf("failure flattening `gateway`: %+v", err) } flattenHDInsightsMetastores(d, configurations.Configurations) @@ -304,7 +304,7 @@ func resourceArmHDInsightKafkaClusterRead(d *schema.ResourceData, meta interface } flattenedRoles := flattenHDInsightRoles(d, props.ComputeProfile, kafkaRoles) if err := d.Set("roles", flattenedRoles); err != nil { - return fmt.Errorf("Error flattening `roles`: %+v", err) + return fmt.Errorf("failure flattening `roles`: %+v", err) } httpEndpoint := azure.FindHDInsightConnectivityEndpoint("HTTPS", props.ConnectivityEndpoints) diff --git a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go index 507d2bf69907..4aca44727903 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_spark_cluster_resource.go @@ -155,7 +155,7 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa storageAccountsGen2Raw := d.Get("storage_account_gen2").([]interface{}) storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, storageAccountsGen2Raw) if err != nil { - return fmt.Errorf("Error expanding `storage_account`: %s", err) + return fmt.Errorf("failure expanding `storage_account`: %s", err) } sparkRoles := hdInsightRoleDefinition{ @@ -166,14 +166,14 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa rolesRaw := d.Get("roles").([]interface{}) roles, err := expandHDInsightRoles(rolesRaw, sparkRoles) if err != nil { - return fmt.Errorf("Error expanding `roles`: %+v", err) + return fmt.Errorf("failure expanding `roles`: %+v", err) } if features.ShouldResourcesBeImported() { existing, err := client.Get(ctx, resourceGroup, name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure checking for presence of existing HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } } @@ -206,20 +206,20 @@ func resourceArmHDInsightSparkClusterCreate(d *schema.ResourceData, meta interfa } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { - return fmt.Errorf("Error creating HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure creating HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for creation of HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed waiting for creation of HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } read, err := client.Get(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if read.ID == nil { - return fmt.Errorf("Error reading ID for HDInsight Spark Cluster %q (Resource Group %q)", name, resourceGroup) + return fmt.Errorf("failure reading ID for HDInsight Spark Cluster %q (Resource Group %q)", name, resourceGroup) } d.SetId(*read.ID) @@ -258,18 +258,18 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface return nil } - return fmt.Errorf("Error retrieving HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving Configuration for HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving Configuration for HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } gateway, exists := configurations.Configurations["gateway"] if !exists { - return fmt.Errorf("Error retrieving gateway for HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving gateway for HDInsight Spark Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("name", name) @@ -286,11 +286,11 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface if def := props.ClusterDefinition; def != nil { if err := d.Set("component_version", flattenHDInsightSparkComponentVersion(def.ComponentVersion)); err != nil { - return fmt.Errorf("Error flattening `component_version`: %+v", err) + return fmt.Errorf("failure flattening `component_version`: %+v", err) } if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { - return fmt.Errorf("Error flattening `gateway`: %+v", err) + return fmt.Errorf("failure flattening `gateway`: %+v", err) } flattenHDInsightsMetastores(d, configurations.Configurations) @@ -303,7 +303,7 @@ func resourceArmHDInsightSparkClusterRead(d *schema.ResourceData, meta interface } flattenedRoles := flattenHDInsightRoles(d, props.ComputeProfile, sparkRoles) if err := d.Set("roles", flattenedRoles); err != nil { - return fmt.Errorf("Error flattening `roles`: %+v", err) + return fmt.Errorf("failure flattening `roles`: %+v", err) } httpEndpoint := azure.FindHDInsightConnectivityEndpoint("HTTPS", props.ConnectivityEndpoints) diff --git a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go index 400251c3fcc8..d6b4cea5aeaa 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_storm_cluster_resource.go @@ -152,7 +152,7 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa storageAccountsRaw := d.Get("storage_account").([]interface{}) storageAccounts, identity, err := azure.ExpandHDInsightsStorageAccounts(storageAccountsRaw, nil) if err != nil { - return fmt.Errorf("Error expanding `storage_account`: %s", err) + return fmt.Errorf("failure expanding `storage_account`: %s", err) } stormRoles := hdInsightRoleDefinition{ @@ -163,14 +163,14 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa rolesRaw := d.Get("roles").([]interface{}) roles, err := expandHDInsightRoles(rolesRaw, stormRoles) if err != nil { - return fmt.Errorf("Error expanding `roles`: %+v", err) + return fmt.Errorf("failure expanding `roles`: %+v", err) } if features.ShouldResourcesBeImported() { existing, err := client.Get(ctx, resourceGroup, name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure checking for presence of existing HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } } @@ -203,20 +203,20 @@ func resourceArmHDInsightStormClusterCreate(d *schema.ResourceData, meta interfa } future, err := client.Create(ctx, resourceGroup, name, params) if err != nil { - return fmt.Errorf("Error creating HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure creating HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for creation of HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failed waiting for creation of HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } read, err := client.Get(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } if read.ID == nil { - return fmt.Errorf("Error reading ID for HDInsight Storm Cluster %q (Resource Group %q)", name, resourceGroup) + return fmt.Errorf("failure reading ID for HDInsight Storm Cluster %q (Resource Group %q)", name, resourceGroup) } d.SetId(*read.ID) @@ -255,18 +255,18 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface return nil } - return fmt.Errorf("Error retrieving HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } // Each call to configurationsClient methods is HTTP request. Getting all settings in one operation configurations, err := configurationsClient.List(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving Configuration for HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving Configuration for HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } gateway, exists := configurations.Configurations["gateway"] if !exists { - return fmt.Errorf("Error retrieving gateway for HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + return fmt.Errorf("failure retrieving gateway for HDInsight Storm Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } d.Set("name", name) @@ -283,11 +283,11 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface if def := props.ClusterDefinition; def != nil { if err := d.Set("component_version", flattenHDInsightStormComponentVersion(def.ComponentVersion)); err != nil { - return fmt.Errorf("Error flattening `component_version`: %+v", err) + return fmt.Errorf("failure flattening `component_version`: %+v", err) } if err := d.Set("gateway", azure.FlattenHDInsightsConfigurations(gateway)); err != nil { - return fmt.Errorf("Error flattening `gateway`: %+v", err) + return fmt.Errorf("failure flattening `gateway`: %+v", err) } flattenHDInsightsMetastores(d, configurations.Configurations) @@ -300,7 +300,7 @@ func resourceArmHDInsightStormClusterRead(d *schema.ResourceData, meta interface } flattenedRoles := flattenHDInsightRoles(d, props.ComputeProfile, stormRoles) if err := d.Set("roles", flattenedRoles); err != nil { - return fmt.Errorf("Error flattening `roles`: %+v", err) + return fmt.Errorf("failure flattening `roles`: %+v", err) } httpEndpoint := azure.FindHDInsightConnectivityEndpoint("HTTPS", props.ConnectivityEndpoints) From 2abd3348518e826473c71a95dc707ae9641913c1 Mon Sep 17 00:00:00 2001 From: jackofallops Date: Wed, 8 Jul 2020 15:34:25 +0100 Subject: [PATCH 46/48] whitespace fixes --- website/docs/r/hdinsight_ml_services_cluster.html.markdown | 2 -- website/docs/r/hdinsight_rserver_cluster.html.markdown | 2 -- 2 files changed, 4 deletions(-) diff --git a/website/docs/r/hdinsight_ml_services_cluster.html.markdown b/website/docs/r/hdinsight_ml_services_cluster.html.markdown index dfb619b78f31..0769ac41a08e 100644 --- a/website/docs/r/hdinsight_ml_services_cluster.html.markdown +++ b/website/docs/r/hdinsight_ml_services_cluster.html.markdown @@ -245,8 +245,6 @@ The following attributes are exported: ## Timeouts - - The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: * `create` - (Defaults to 60 minutes) Used when creating the MLServices HDInsight Cluster. diff --git a/website/docs/r/hdinsight_rserver_cluster.html.markdown b/website/docs/r/hdinsight_rserver_cluster.html.markdown index e61f6d26ad24..caad16c7e9ad 100644 --- a/website/docs/r/hdinsight_rserver_cluster.html.markdown +++ b/website/docs/r/hdinsight_rserver_cluster.html.markdown @@ -245,8 +245,6 @@ The following attributes are exported: ## Timeouts - - The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: * `create` - (Defaults to 60 minutes) Used when creating the RServer HDInsight Cluster. From 19028564dbfafaca29651a297e4e2f7176643a6d Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Wed, 8 Jul 2020 08:12:28 -0700 Subject: [PATCH 47/48] Validation for log_analytics_workspace_id --- azurerm/helpers/azure/hdinsight.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/azurerm/helpers/azure/hdinsight.go b/azurerm/helpers/azure/hdinsight.go index 67bdeef40411..d89f5d3f6824 100644 --- a/azurerm/helpers/azure/hdinsight.go +++ b/azurerm/helpers/azure/hdinsight.go @@ -189,8 +189,9 @@ func SchemaHDInsightsMonitor() *schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "log_analytics_workspace_id": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.IsUUID, }, "primary_key": { Type: schema.TypeString, From 6745f26b0a34381fd59445880f324cd1e4dc7a71 Mon Sep 17 00:00:00 2001 From: Konstantin Kosinsky Date: Wed, 8 Jul 2020 09:58:51 -0700 Subject: [PATCH 48/48] Fix bad merbge and move extensionsClient --- azurerm/internal/services/hdinsight/common_hdinsight.go | 2 +- .../hdinsight/hdinsight_hadoop_cluster_resource.go | 8 -------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/azurerm/internal/services/hdinsight/common_hdinsight.go b/azurerm/internal/services/hdinsight/common_hdinsight.go index b29d572b4dcb..aac757732d3e 100644 --- a/azurerm/internal/services/hdinsight/common_hdinsight.go +++ b/azurerm/internal/services/hdinsight/common_hdinsight.go @@ -19,6 +19,7 @@ import ( func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema.UpdateFunc { return func(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).HDInsight.ClustersClient + extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -108,7 +109,6 @@ func hdinsightClusterUpdate(clusterKind string, readFunc schema.ReadFunc) schema if d.HasChange("monitor") { log.Printf("[DEBUG] Change Azure Monitor for the HDInsight %q Cluster", clusterKind) - extensionsClient := meta.(*clients.Client).HDInsight.ExtensionsClient if v, ok := d.GetOk("monitor"); ok { monitorRaw := v.([]interface{}) if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { diff --git a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go index 9f73be339619..57da2ee05e7f 100644 --- a/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go +++ b/azurerm/internal/services/hdinsight/hdinsight_hadoop_cluster_resource.go @@ -305,14 +305,6 @@ func resourceArmHDInsightHadoopClusterCreate(d *schema.ResourceData, meta interf } } - // We can only enable monitoring after creation - if v, ok := d.GetOk("monitor"); ok { - monitorRaw := v.([]interface{}) - if err := enableHDInsightMonitoring(ctx, extensionsClient, resourceGroup, name, monitorRaw); err != nil { - return err - } - } - return resourceArmHDInsightHadoopClusterRead(d, meta) }