From c9f18536ee723efbe697e5c09e7f8bab929e0108 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 12 Dec 2018 11:52:58 +0000 Subject: [PATCH] r/kubernetes_cluster: support for rbac without aad Fixes #2345 --- azurerm/data_source_kubernetes_cluster.go | 41 ++++++---- .../data_source_kubernetes_cluster_test.go | 47 ++++++++++- azurerm/resource_arm_kubernetes_cluster.go | 82 +++++++++++-------- .../resource_arm_kubernetes_cluster_test.go | 82 ++++++++++++++++++- .../role-based-access-control-azuread/main.tf | 41 ++++++++++ .../outputs.tf | 23 ++++++ .../variables.tf | 16 ++++ .../role-based-access-control/main.tf | 9 +- .../docs/d/kubernetes_cluster.html.markdown | 2 + .../docs/r/kubernetes_cluster.html.markdown | 2 + 10 files changed, 278 insertions(+), 67 deletions(-) create mode 100644 examples/kubernetes/role-based-access-control-azuread/main.tf create mode 100644 examples/kubernetes/role-based-access-control-azuread/outputs.tf create mode 100644 examples/kubernetes/role-based-access-control-azuread/variables.tf diff --git a/azurerm/data_source_kubernetes_cluster.go b/azurerm/data_source_kubernetes_cluster.go index e9cacd8fe29d..aa4585ff21a2 100644 --- a/azurerm/data_source_kubernetes_cluster.go +++ b/azurerm/data_source_kubernetes_cluster.go @@ -241,6 +241,10 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, "azure_active_directory": { Type: schema.TypeList, Computed: true, @@ -341,7 +345,7 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error setting `network_profile`: %+v", err) } - roleBasedAccessControl := flattenKubernetesClusterDataSourceRoleBasedAccessControl(props.AadProfile) + roleBasedAccessControl := flattenKubernetesClusterDataSourceRoleBasedAccessControl(props) if err := d.Set("role_based_access_control", roleBasedAccessControl); err != nil { return fmt.Errorf("Error setting `role_based_access_control`: %+v", err) } @@ -363,30 +367,35 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} return nil } -func flattenKubernetesClusterDataSourceRoleBasedAccessControl(input *containerservice.ManagedClusterAADProfile) []interface{} { - if input == nil { - return []interface{}{} +func flattenKubernetesClusterDataSourceRoleBasedAccessControl(input *containerservice.ManagedClusterProperties) []interface{} { + rbacEnabled := false + if input.EnableRBAC != nil { + rbacEnabled = *input.EnableRBAC } - profile := make(map[string]interface{}) + results := make([]interface{}, 0) + if profile := input.AadProfile; profile != nil { + output := make(map[string]interface{}) - if input.ClientAppID != nil { - profile["client_app_id"] = *input.ClientAppID - } + if profile.ClientAppID != nil { + output["client_app_id"] = *profile.ClientAppID + } - if input.ServerAppID != nil { - profile["server_app_id"] = *input.ServerAppID - } + if profile.ServerAppID != nil { + output["server_app_id"] = *profile.ServerAppID + } - if input.TenantID != nil { - profile["tenant_id"] = *input.TenantID + if profile.TenantID != nil { + output["tenant_id"] = *profile.TenantID + } + + results = append(results, output) } return []interface{}{ map[string]interface{}{ - "azure_active_directory": []interface{}{ - profile, - }, + "enabled": rbacEnabled, + "azure_active_directory": results, }, } } diff --git a/azurerm/data_source_kubernetes_cluster_test.go b/azurerm/data_source_kubernetes_cluster_test.go index a217ebaa95ea..3dc2e79a6705 100644 --- a/azurerm/data_source_kubernetes_cluster_test.go +++ b/azurerm/data_source_kubernetes_cluster_test.go @@ -26,7 +26,8 @@ func TestAccDataSourceAzureRMKubernetesCluster_basic(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(dataSourceName), - resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "0"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.enabled", "false"), resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.client_key"), resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.client_certificate"), resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.cluster_ca_certificate"), @@ -40,6 +41,31 @@ func TestAccDataSourceAzureRMKubernetesCluster_basic(t *testing.T) { } func TestAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { + dataSourceName := "data.azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + location := testLocation() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(dataSourceName), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.enabled", "true"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + ), + }, + }, + }) +} + +func TestAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { dataSourceName := "data.azurerm_kubernetes_cluster.test" ri := acctest.RandInt() clientId := os.Getenv("ARM_CLIENT_ID") @@ -53,10 +79,11 @@ func TestAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(t *testing CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret, tenantId), + Config: testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, tenantId), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(dataSourceName), resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.enabled", "true"), resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.azure_active_directory.#", "1"), resource.TestCheckResourceAttrSet(dataSourceName, "role_based_access_control.0.azure_active_directory.0.client_app_id"), resource.TestCheckResourceAttrSet(dataSourceName, "role_based_access_control.0.azure_active_directory.0.server_app_id"), @@ -273,8 +300,20 @@ data "azurerm_kubernetes_cluster" "test" { `, r) } -func testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret, tenantId string) string { - resource := testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt, location, clientId, clientSecret, tenantId) +func testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret string) string { + resource := testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt, location, clientId, clientSecret) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster" "test" { + name = "${azurerm_kubernetes_cluster.test.name}" + resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" +} +`, resource) +} + +func testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD(rInt int, location, clientId, clientSecret, tenantId string) string { + resource := testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(rInt, location, clientId, clientSecret, tenantId) return fmt.Sprintf(` %s diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 90c496d31447..438e7162c5a3 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -314,13 +314,19 @@ func resourceArmKubernetesCluster() *schema.Resource { "role_based_access_control": { Type: schema.TypeList, Optional: true, + Computed: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, "azure_active_directory": { Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -450,8 +456,7 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter } rbacRaw := d.Get("role_based_access_control").([]interface{}) - azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) - roleBasedAccessControlEnabled := azureADProfile != nil + rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) parameters := containerservice.ManagedCluster{ Name: &name, @@ -461,7 +466,7 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter AddonProfiles: addonProfiles, AgentPoolProfiles: &agentProfiles, DNSPrefix: utils.String(dnsPrefix), - EnableRBAC: utils.Bool(roleBasedAccessControlEnabled), + EnableRBAC: utils.Bool(rbacEnabled), KubernetesVersion: utils.String(kubernetesVersion), LinuxProfile: linuxProfile, NetworkProfile: networkProfile, @@ -552,7 +557,7 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting `network_profile`: %+v", err) } - roleBasedAccessControl := flattenKubernetesClusterRoleBasedAccessControl(props.AadProfile, d) + roleBasedAccessControl := flattenKubernetesClusterRoleBasedAccessControl(props, d) if err := d.Set("role_based_access_control", roleBasedAccessControl); err != nil { return fmt.Errorf("Error setting `role_based_access_control`: %+v", err) } @@ -914,13 +919,15 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro return []interface{}{values} } -func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, providerTenantId string) *containerservice.ManagedClusterAADProfile { +func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, providerTenantId string) (bool, *containerservice.ManagedClusterAADProfile) { if len(input) == 0 { - return nil + return false, nil } val := input[0].(map[string]interface{}) + rbacEnabled := val["enabled"].(bool) + azureADsRaw := val["azure_active_directory"].([]interface{}) azureAdRaw := azureADsRaw[0].(map[string]interface{}) @@ -933,7 +940,7 @@ func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, provider tenantId = providerTenantId } - return &containerservice.ManagedClusterAADProfile{ + return rbacEnabled, &containerservice.ManagedClusterAADProfile{ ClientAppID: utils.String(clientAppId), ServerAppID: utils.String(serverAppId), ServerAppSecret: utils.String(serverAppSecret), @@ -941,46 +948,51 @@ func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, provider } } -func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.ManagedClusterAADProfile, d *schema.ResourceData) []interface{} { - if input == nil { - return []interface{}{} +func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.ManagedClusterProperties, d *schema.ResourceData) []interface{} { + rbacEnabled := false + if input.EnableRBAC != nil { + rbacEnabled = *input.EnableRBAC } - profile := make(map[string]interface{}) + results := make([]interface{}, 0) + if profile := input.AadProfile; profile != nil { + output := make(map[string]interface{}) - if input.ClientAppID != nil { - profile["client_app_id"] = *input.ClientAppID - } + if profile.ClientAppID != nil { + output["client_app_id"] = *profile.ClientAppID + } - if input.ServerAppID != nil { - profile["server_app_id"] = *input.ServerAppID - } + if profile.ServerAppID != nil { + output["server_app_id"] = *profile.ServerAppID + } - // since input.ServerAppSecret isn't returned we're pulling this out of the existing state (which won't work for Imports) - // role_based_access_control.0.azure_active_directory.0.server_app_secret - if existing, ok := d.GetOk("role_based_access_control"); ok { - rbacRawVals := existing.([]interface{}) - if len(rbacRawVals) > 0 { - rbacRawVal := rbacRawVals[0].(map[string]interface{}) - if azureADVals, ok := rbacRawVal["azure_active_directory"].([]interface{}); ok && len(azureADVals) > 0 { - azureADVal := azureADVals[0].(map[string]interface{}) - v := azureADVal["server_app_secret"] - if v != nil { - profile["server_app_secret"] = v.(string) + // since input.ServerAppSecret isn't returned we're pulling this out of the existing state (which won't work for Imports) + // role_based_access_control.0.azure_active_directory.0.server_app_secret + if existing, ok := d.GetOk("role_based_access_control"); ok { + rbacRawVals := existing.([]interface{}) + if len(rbacRawVals) > 0 { + rbacRawVal := rbacRawVals[0].(map[string]interface{}) + if azureADVals, ok := rbacRawVal["azure_active_directory"].([]interface{}); ok && len(azureADVals) > 0 { + azureADVal := azureADVals[0].(map[string]interface{}) + v := azureADVal["server_app_secret"] + if v != nil { + output["server_app_secret"] = v.(string) + } } } } - } - if input.TenantID != nil { - profile["tenant_id"] = *input.TenantID + if profile.TenantID != nil { + output["tenant_id"] = *profile.TenantID + } + + results = append(results, output) } return []interface{}{ map[string]interface{}{ - "azure_active_directory": []interface{}{ - profile, - }, + "enabled": rbacEnabled, + "azure_active_directory": results, }, } } diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 72126d040cc3..db157506d567 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -85,7 +85,9 @@ func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "0"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), @@ -110,6 +112,36 @@ func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { location := testLocation() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + location := testLocation() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") tenantId := os.Getenv("ARM_TENANT_ID") resource.Test(t, resource.TestCase{ @@ -118,10 +150,11 @@ func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret, ""), + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, ""), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.client_app_id"), resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.server_app_id"), @@ -137,7 +170,7 @@ func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { }, { // should be no changes since the default for Tenant ID comes from the Provider block - Config: testAccAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret, tenantId), + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, tenantId), PlanOnly: true, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), @@ -559,7 +592,46 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, clientId, clientSecret) } -func testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret, tenantId string) string { +func testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + role_based_access_control { + enabled = true + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(rInt int, location, clientId, clientSecret, tenantId string) string { return fmt.Sprintf(` variable "tenant_id" { default = "%s" @@ -596,6 +668,8 @@ resource "azurerm_kubernetes_cluster" "test" { } role_based_access_control { + enabled = true + azure_active_directory { server_app_id = "%s" server_app_secret = "%s" diff --git a/examples/kubernetes/role-based-access-control-azuread/main.tf b/examples/kubernetes/role-based-access-control-azuread/main.tf new file mode 100644 index 000000000000..4c4de2ab83fd --- /dev/null +++ b/examples/kubernetes/role-based-access-control-azuread/main.tf @@ -0,0 +1,41 @@ +resource "azurerm_resource_group" "test" { + name = "${var.prefix}-rbac-resources" + location = "${var.location}" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "${var.prefix}-rbac" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "${var.prefix}-rbac" + + agent_pool_profile { + name = "default" + count = 1 + vm_size = "Standard_D1_v2" + os_type = "Linux" + os_disk_size_gb = 30 + } + + service_principal { + client_id = "${var.kubernetes_client_id}" + client_secret = "${var.kubernetes_client_secret}" + } + + role_based_access_control { + enabled = true + + azure_active_directory { + # NOTE: in a Production environment these should be different values + # but for the purposes of this example, this should be sufficient + client_app_id = "${var.kubernetes_client_id}" + + server_app_id = "${var.kubernetes_client_id}" + server_app_secret = "${var.kubernetes_client_secret}" + } + } + + tags { + Environment = "Production" + } +} diff --git a/examples/kubernetes/role-based-access-control-azuread/outputs.tf b/examples/kubernetes/role-based-access-control-azuread/outputs.tf new file mode 100644 index 000000000000..a3df678428c8 --- /dev/null +++ b/examples/kubernetes/role-based-access-control-azuread/outputs.tf @@ -0,0 +1,23 @@ +output "id" { + value = "${azurerm_kubernetes_cluster.test.id}" +} + +output "kube_config" { + value = "${azurerm_kubernetes_cluster.test.kube_config_raw}" +} + +output "client_key" { + value = "${azurerm_kubernetes_cluster.test.kube_config.0.client_key}" +} + +output "client_certificate" { + value = "${azurerm_kubernetes_cluster.test.kube_config.0.client_certificate}" +} + +output "cluster_ca_certificate" { + value = "${azurerm_kubernetes_cluster.test.kube_config.0.cluster_ca_certificate}" +} + +output "host" { + value = "${azurerm_kubernetes_cluster.test.kube_config.0.host}" +} diff --git a/examples/kubernetes/role-based-access-control-azuread/variables.tf b/examples/kubernetes/role-based-access-control-azuread/variables.tf new file mode 100644 index 000000000000..7de68ae5473f --- /dev/null +++ b/examples/kubernetes/role-based-access-control-azuread/variables.tf @@ -0,0 +1,16 @@ +variable "prefix" { + description = "A prefix used for all resources in this example" +} + +variable "location" { + default = "West Europe" + description = "The Azure Region in which all resources in this example should be provisioned" +} + +variable "kubernetes_client_id" { + description = "The Client ID for the Service Principal to use for this Managed Kubernetes Cluster" +} + +variable "kubernetes_client_secret" { + description = "The Client Secret for the Service Principal to use for this Managed Kubernetes Cluster" +} diff --git a/examples/kubernetes/role-based-access-control/main.tf b/examples/kubernetes/role-based-access-control/main.tf index ca7128f6373c..66380dc43112 100644 --- a/examples/kubernetes/role-based-access-control/main.tf +++ b/examples/kubernetes/role-based-access-control/main.tf @@ -23,14 +23,7 @@ resource "azurerm_kubernetes_cluster" "test" { } role_based_access_control { - azure_active_directory { - # NOTE: in a Production environment these should be different values - # but for the purposes of this example, this should be sufficient - client_app_id = "${var.kubernetes_client_id}" - - server_app_id = "${var.kubernetes_client_id}" - server_app_secret = "${var.kubernetes_client_secret}" - } + enabled = true } tags { diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index d2bbcb2ac70c..74bf12daddb9 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -173,6 +173,8 @@ A `role_based_access_control` block exports the following: * `azure_active_directory` - A `azure_active_directory` block as documented above. +* `enabled` - Is Role Based Access Control enabled? + --- A `service_principal` block supports the following: diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 20f69d162095..80628d3a7f34 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -169,6 +169,8 @@ A `role_based_access_control` block supports the following: * `azure_active_directory` - (Required) An `azure_active_directory` block. Changing this forces a new resource to be created. +* `enabled` - (Required) Is Role Based Access Control Enabled? Changing this forces a new resource to be created. + --- A `service_principal` block supports the following: