From c9f18536ee723efbe697e5c09e7f8bab929e0108 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 12 Dec 2018 11:52:58 +0000 Subject: [PATCH 1/4] r/kubernetes_cluster: support for rbac without aad Fixes #2345 --- azurerm/data_source_kubernetes_cluster.go | 41 ++++++---- .../data_source_kubernetes_cluster_test.go | 47 ++++++++++- azurerm/resource_arm_kubernetes_cluster.go | 82 +++++++++++-------- .../resource_arm_kubernetes_cluster_test.go | 82 ++++++++++++++++++- .../role-based-access-control-azuread/main.tf | 41 ++++++++++ .../outputs.tf | 23 ++++++ .../variables.tf | 16 ++++ .../role-based-access-control/main.tf | 9 +- .../docs/d/kubernetes_cluster.html.markdown | 2 + .../docs/r/kubernetes_cluster.html.markdown | 2 + 10 files changed, 278 insertions(+), 67 deletions(-) create mode 100644 examples/kubernetes/role-based-access-control-azuread/main.tf create mode 100644 examples/kubernetes/role-based-access-control-azuread/outputs.tf create mode 100644 examples/kubernetes/role-based-access-control-azuread/variables.tf diff --git a/azurerm/data_source_kubernetes_cluster.go b/azurerm/data_source_kubernetes_cluster.go index e9cacd8fe29d..aa4585ff21a2 100644 --- a/azurerm/data_source_kubernetes_cluster.go +++ b/azurerm/data_source_kubernetes_cluster.go @@ -241,6 +241,10 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, "azure_active_directory": { Type: schema.TypeList, Computed: true, @@ -341,7 +345,7 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error setting `network_profile`: %+v", err) } - roleBasedAccessControl := flattenKubernetesClusterDataSourceRoleBasedAccessControl(props.AadProfile) + roleBasedAccessControl := flattenKubernetesClusterDataSourceRoleBasedAccessControl(props) if err := d.Set("role_based_access_control", roleBasedAccessControl); err != nil { return fmt.Errorf("Error setting `role_based_access_control`: %+v", err) } @@ -363,30 +367,35 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} return nil } -func flattenKubernetesClusterDataSourceRoleBasedAccessControl(input *containerservice.ManagedClusterAADProfile) []interface{} { - if input == nil { - return []interface{}{} +func flattenKubernetesClusterDataSourceRoleBasedAccessControl(input *containerservice.ManagedClusterProperties) []interface{} { + rbacEnabled := false + if input.EnableRBAC != nil { + rbacEnabled = *input.EnableRBAC } - profile := make(map[string]interface{}) + results := make([]interface{}, 0) + if profile := input.AadProfile; profile != nil { + output := make(map[string]interface{}) - if input.ClientAppID != nil { - profile["client_app_id"] = *input.ClientAppID - } + if profile.ClientAppID != nil { + output["client_app_id"] = *profile.ClientAppID + } - if input.ServerAppID != nil { - profile["server_app_id"] = *input.ServerAppID - } + if profile.ServerAppID != nil { + output["server_app_id"] = *profile.ServerAppID + } - if input.TenantID != nil { - profile["tenant_id"] = *input.TenantID + if profile.TenantID != nil { + output["tenant_id"] = *profile.TenantID + } + + results = append(results, output) } return []interface{}{ map[string]interface{}{ - "azure_active_directory": []interface{}{ - profile, - }, + "enabled": rbacEnabled, + "azure_active_directory": results, }, } } diff --git a/azurerm/data_source_kubernetes_cluster_test.go b/azurerm/data_source_kubernetes_cluster_test.go index a217ebaa95ea..3dc2e79a6705 100644 --- a/azurerm/data_source_kubernetes_cluster_test.go +++ b/azurerm/data_source_kubernetes_cluster_test.go @@ -26,7 +26,8 @@ func TestAccDataSourceAzureRMKubernetesCluster_basic(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(dataSourceName), - resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "0"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.enabled", "false"), resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.client_key"), resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.client_certificate"), resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.cluster_ca_certificate"), @@ -40,6 +41,31 @@ func TestAccDataSourceAzureRMKubernetesCluster_basic(t *testing.T) { } func TestAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { + dataSourceName := "data.azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + location := testLocation() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(dataSourceName), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.enabled", "true"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + ), + }, + }, + }) +} + +func TestAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { dataSourceName := "data.azurerm_kubernetes_cluster.test" ri := acctest.RandInt() clientId := os.Getenv("ARM_CLIENT_ID") @@ -53,10 +79,11 @@ func TestAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(t *testing CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret, tenantId), + Config: testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, tenantId), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(dataSourceName), resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.enabled", "true"), resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.azure_active_directory.#", "1"), resource.TestCheckResourceAttrSet(dataSourceName, "role_based_access_control.0.azure_active_directory.0.client_app_id"), resource.TestCheckResourceAttrSet(dataSourceName, "role_based_access_control.0.azure_active_directory.0.server_app_id"), @@ -273,8 +300,20 @@ data "azurerm_kubernetes_cluster" "test" { `, r) } -func testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret, tenantId string) string { - resource := testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt, location, clientId, clientSecret, tenantId) +func testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret string) string { + resource := testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt, location, clientId, clientSecret) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster" "test" { + name = "${azurerm_kubernetes_cluster.test.name}" + resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}" +} +`, resource) +} + +func testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD(rInt int, location, clientId, clientSecret, tenantId string) string { + resource := testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(rInt, location, clientId, clientSecret, tenantId) return fmt.Sprintf(` %s diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 90c496d31447..438e7162c5a3 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -314,13 +314,19 @@ func resourceArmKubernetesCluster() *schema.Resource { "role_based_access_control": { Type: schema.TypeList, Optional: true, + Computed: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + }, "azure_active_directory": { Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -450,8 +456,7 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter } rbacRaw := d.Get("role_based_access_control").([]interface{}) - azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) - roleBasedAccessControlEnabled := azureADProfile != nil + rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) parameters := containerservice.ManagedCluster{ Name: &name, @@ -461,7 +466,7 @@ func resourceArmKubernetesClusterCreateUpdate(d *schema.ResourceData, meta inter AddonProfiles: addonProfiles, AgentPoolProfiles: &agentProfiles, DNSPrefix: utils.String(dnsPrefix), - EnableRBAC: utils.Bool(roleBasedAccessControlEnabled), + EnableRBAC: utils.Bool(rbacEnabled), KubernetesVersion: utils.String(kubernetesVersion), LinuxProfile: linuxProfile, NetworkProfile: networkProfile, @@ -552,7 +557,7 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error setting `network_profile`: %+v", err) } - roleBasedAccessControl := flattenKubernetesClusterRoleBasedAccessControl(props.AadProfile, d) + roleBasedAccessControl := flattenKubernetesClusterRoleBasedAccessControl(props, d) if err := d.Set("role_based_access_control", roleBasedAccessControl); err != nil { return fmt.Errorf("Error setting `role_based_access_control`: %+v", err) } @@ -914,13 +919,15 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro return []interface{}{values} } -func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, providerTenantId string) *containerservice.ManagedClusterAADProfile { +func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, providerTenantId string) (bool, *containerservice.ManagedClusterAADProfile) { if len(input) == 0 { - return nil + return false, nil } val := input[0].(map[string]interface{}) + rbacEnabled := val["enabled"].(bool) + azureADsRaw := val["azure_active_directory"].([]interface{}) azureAdRaw := azureADsRaw[0].(map[string]interface{}) @@ -933,7 +940,7 @@ func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, provider tenantId = providerTenantId } - return &containerservice.ManagedClusterAADProfile{ + return rbacEnabled, &containerservice.ManagedClusterAADProfile{ ClientAppID: utils.String(clientAppId), ServerAppID: utils.String(serverAppId), ServerAppSecret: utils.String(serverAppSecret), @@ -941,46 +948,51 @@ func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, provider } } -func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.ManagedClusterAADProfile, d *schema.ResourceData) []interface{} { - if input == nil { - return []interface{}{} +func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.ManagedClusterProperties, d *schema.ResourceData) []interface{} { + rbacEnabled := false + if input.EnableRBAC != nil { + rbacEnabled = *input.EnableRBAC } - profile := make(map[string]interface{}) + results := make([]interface{}, 0) + if profile := input.AadProfile; profile != nil { + output := make(map[string]interface{}) - if input.ClientAppID != nil { - profile["client_app_id"] = *input.ClientAppID - } + if profile.ClientAppID != nil { + output["client_app_id"] = *profile.ClientAppID + } - if input.ServerAppID != nil { - profile["server_app_id"] = *input.ServerAppID - } + if profile.ServerAppID != nil { + output["server_app_id"] = *profile.ServerAppID + } - // since input.ServerAppSecret isn't returned we're pulling this out of the existing state (which won't work for Imports) - // role_based_access_control.0.azure_active_directory.0.server_app_secret - if existing, ok := d.GetOk("role_based_access_control"); ok { - rbacRawVals := existing.([]interface{}) - if len(rbacRawVals) > 0 { - rbacRawVal := rbacRawVals[0].(map[string]interface{}) - if azureADVals, ok := rbacRawVal["azure_active_directory"].([]interface{}); ok && len(azureADVals) > 0 { - azureADVal := azureADVals[0].(map[string]interface{}) - v := azureADVal["server_app_secret"] - if v != nil { - profile["server_app_secret"] = v.(string) + // since input.ServerAppSecret isn't returned we're pulling this out of the existing state (which won't work for Imports) + // role_based_access_control.0.azure_active_directory.0.server_app_secret + if existing, ok := d.GetOk("role_based_access_control"); ok { + rbacRawVals := existing.([]interface{}) + if len(rbacRawVals) > 0 { + rbacRawVal := rbacRawVals[0].(map[string]interface{}) + if azureADVals, ok := rbacRawVal["azure_active_directory"].([]interface{}); ok && len(azureADVals) > 0 { + azureADVal := azureADVals[0].(map[string]interface{}) + v := azureADVal["server_app_secret"] + if v != nil { + output["server_app_secret"] = v.(string) + } } } } - } - if input.TenantID != nil { - profile["tenant_id"] = *input.TenantID + if profile.TenantID != nil { + output["tenant_id"] = *profile.TenantID + } + + results = append(results, output) } return []interface{}{ map[string]interface{}{ - "azure_active_directory": []interface{}{ - profile, - }, + "enabled": rbacEnabled, + "azure_active_directory": results, }, } } diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 72126d040cc3..db157506d567 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -85,7 +85,9 @@ func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "0"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_key"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.client_certificate"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.cluster_ca_certificate"), @@ -110,6 +112,36 @@ func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { location := testLocation() clientId := os.Getenv("ARM_CLIENT_ID") clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { + resourceName := "azurerm_kubernetes_cluster.test" + ri := acctest.RandInt() + location := testLocation() + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") tenantId := os.Getenv("ARM_TENANT_ID") resource.Test(t, resource.TestCase{ @@ -118,10 +150,11 @@ func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret, ""), + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, ""), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.client_app_id"), resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.server_app_id"), @@ -137,7 +170,7 @@ func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { }, { // should be no changes since the default for Tenant ID comes from the Provider block - Config: testAccAzureRMKubernetesCluster_roleBasedAccessControl(ri, location, clientId, clientSecret, tenantId), + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(ri, location, clientId, clientSecret, tenantId), PlanOnly: true, Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), @@ -559,7 +592,46 @@ resource "azurerm_kubernetes_cluster" "test" { `, rInt, location, rInt, rInt, clientId, clientSecret) } -func testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret, tenantId string) string { +func testAccAzureRMKubernetesCluster_roleBasedAccessControl(rInt int, location, clientId, clientSecret string) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + agent_pool_profile { + name = "default" + count = "1" + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + role_based_access_control { + enabled = true + } +} +`, rInt, location, rInt, rInt, rInt, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(rInt int, location, clientId, clientSecret, tenantId string) string { return fmt.Sprintf(` variable "tenant_id" { default = "%s" @@ -596,6 +668,8 @@ resource "azurerm_kubernetes_cluster" "test" { } role_based_access_control { + enabled = true + azure_active_directory { server_app_id = "%s" server_app_secret = "%s" diff --git a/examples/kubernetes/role-based-access-control-azuread/main.tf b/examples/kubernetes/role-based-access-control-azuread/main.tf new file mode 100644 index 000000000000..4c4de2ab83fd --- /dev/null +++ b/examples/kubernetes/role-based-access-control-azuread/main.tf @@ -0,0 +1,41 @@ +resource "azurerm_resource_group" "test" { + name = "${var.prefix}-rbac-resources" + location = "${var.location}" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "${var.prefix}-rbac" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "${var.prefix}-rbac" + + agent_pool_profile { + name = "default" + count = 1 + vm_size = "Standard_D1_v2" + os_type = "Linux" + os_disk_size_gb = 30 + } + + service_principal { + client_id = "${var.kubernetes_client_id}" + client_secret = "${var.kubernetes_client_secret}" + } + + role_based_access_control { + enabled = true + + azure_active_directory { + # NOTE: in a Production environment these should be different values + # but for the purposes of this example, this should be sufficient + client_app_id = "${var.kubernetes_client_id}" + + server_app_id = "${var.kubernetes_client_id}" + server_app_secret = "${var.kubernetes_client_secret}" + } + } + + tags { + Environment = "Production" + } +} diff --git a/examples/kubernetes/role-based-access-control-azuread/outputs.tf b/examples/kubernetes/role-based-access-control-azuread/outputs.tf new file mode 100644 index 000000000000..a3df678428c8 --- /dev/null +++ b/examples/kubernetes/role-based-access-control-azuread/outputs.tf @@ -0,0 +1,23 @@ +output "id" { + value = "${azurerm_kubernetes_cluster.test.id}" +} + +output "kube_config" { + value = "${azurerm_kubernetes_cluster.test.kube_config_raw}" +} + +output "client_key" { + value = "${azurerm_kubernetes_cluster.test.kube_config.0.client_key}" +} + +output "client_certificate" { + value = "${azurerm_kubernetes_cluster.test.kube_config.0.client_certificate}" +} + +output "cluster_ca_certificate" { + value = "${azurerm_kubernetes_cluster.test.kube_config.0.cluster_ca_certificate}" +} + +output "host" { + value = "${azurerm_kubernetes_cluster.test.kube_config.0.host}" +} diff --git a/examples/kubernetes/role-based-access-control-azuread/variables.tf b/examples/kubernetes/role-based-access-control-azuread/variables.tf new file mode 100644 index 000000000000..7de68ae5473f --- /dev/null +++ b/examples/kubernetes/role-based-access-control-azuread/variables.tf @@ -0,0 +1,16 @@ +variable "prefix" { + description = "A prefix used for all resources in this example" +} + +variable "location" { + default = "West Europe" + description = "The Azure Region in which all resources in this example should be provisioned" +} + +variable "kubernetes_client_id" { + description = "The Client ID for the Service Principal to use for this Managed Kubernetes Cluster" +} + +variable "kubernetes_client_secret" { + description = "The Client Secret for the Service Principal to use for this Managed Kubernetes Cluster" +} diff --git a/examples/kubernetes/role-based-access-control/main.tf b/examples/kubernetes/role-based-access-control/main.tf index ca7128f6373c..66380dc43112 100644 --- a/examples/kubernetes/role-based-access-control/main.tf +++ b/examples/kubernetes/role-based-access-control/main.tf @@ -23,14 +23,7 @@ resource "azurerm_kubernetes_cluster" "test" { } role_based_access_control { - azure_active_directory { - # NOTE: in a Production environment these should be different values - # but for the purposes of this example, this should be sufficient - client_app_id = "${var.kubernetes_client_id}" - - server_app_id = "${var.kubernetes_client_id}" - server_app_secret = "${var.kubernetes_client_secret}" - } + enabled = true } tags { diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index d2bbcb2ac70c..74bf12daddb9 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -173,6 +173,8 @@ A `role_based_access_control` block exports the following: * `azure_active_directory` - A `azure_active_directory` block as documented above. +* `enabled` - Is Role Based Access Control enabled? + --- A `service_principal` block supports the following: diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 20f69d162095..80628d3a7f34 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -169,6 +169,8 @@ A `role_based_access_control` block supports the following: * `azure_active_directory` - (Required) An `azure_active_directory` block. Changing this forces a new resource to be created. +* `enabled` - (Required) Is Role Based Access Control Enabled? Changing this forces a new resource to be created. + --- A `service_principal` block supports the following: From 6b1b1c8ee4d386faaed480c5364154f677f544b4 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 12 Dec 2018 13:07:29 +0000 Subject: [PATCH 2/4] r/kubernetes_cluster: exporting the adminProfile when available Fixes #2421 --- azurerm/data_source_kubernetes_cluster.go | 67 +++++++++++++++++-- .../data_source_kubernetes_cluster_test.go | 6 ++ azurerm/resource_arm_kubernetes_cluster.go | 59 ++++++++++++++++ .../resource_arm_kubernetes_cluster_test.go | 6 ++ .../docs/d/kubernetes_cluster.html.markdown | 8 ++- .../docs/r/kubernetes_cluster.html.markdown | 10 ++- 6 files changed, 146 insertions(+), 10 deletions(-) diff --git a/azurerm/data_source_kubernetes_cluster.go b/azurerm/data_source_kubernetes_cluster.go index aa4585ff21a2..37fc2193f0ab 100644 --- a/azurerm/data_source_kubernetes_cluster.go +++ b/azurerm/data_source_kubernetes_cluster.go @@ -131,6 +131,47 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Computed: true, }, + "kube_admin_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "client_certificate": { + Type: schema.TypeString, + Computed: true, + }, + "client_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "cluster_ca_certificate": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "kube_admin_config_raw": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "kube_config": { Type: schema.TypeList, Computed: true, @@ -290,14 +331,13 @@ func dataSourceArmKubernetesCluster() *schema.Resource { } func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) error { - kubernetesClustersClient := meta.(*ArmClient).kubernetesClustersClient - client := meta.(*ArmClient) + client := meta.(*ArmClient).kubernetesClustersClient + ctx := meta.(*ArmClient).StopContext name := d.Get("name").(string) resourceGroup := d.Get("resource_group_name").(string) - ctx := client.StopContext - resp, err := kubernetesClustersClient.Get(ctx, resourceGroup, name) + resp, err := client.Get(ctx, resourceGroup, name) if err != nil { if utils.ResponseWasNotFound(resp.Response) { return fmt.Errorf("Error: Managed Kubernetes Cluster %q was not found in Resource Group %q", name, resourceGroup) @@ -306,7 +346,7 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } - profile, err := kubernetesClustersClient.GetAccessProfile(ctx, resourceGroup, name, "clusterUser") + profile, err := client.GetAccessProfile(ctx, resourceGroup, name, "clusterUser") if err != nil { return fmt.Errorf("Error retrieving Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) } @@ -354,6 +394,23 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} if err := d.Set("service_principal", servicePrincipal); err != nil { return fmt.Errorf("Error setting `service_principal`: %+v", err) } + + // adminProfile is only available for RBAC enabled clusters with AAD + if props.AadProfile != nil { + adminProfile, err := client.GetAccessProfile(ctx, resourceGroup, name, "clusterAdmin") + if err != nil { + return fmt.Errorf("Error retrieving Admin Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + adminKubeConfigRaw, adminKubeConfig := flattenKubernetesClusterAccessProfile(adminProfile) + d.Set("kube_admin_config_raw", adminKubeConfigRaw) + if err := d.Set("kube_admin_config", adminKubeConfig); err != nil { + return fmt.Errorf("Error setting `kube_admin_config`: %+v", err) + } + } else { + d.Set("kube_admin_config_raw", "") + d.Set("kube_admin_config", []interface{}{}) + } } kubeConfigRaw, kubeConfig := flattenKubernetesClusterDataSourceAccessProfile(profile) diff --git a/azurerm/data_source_kubernetes_cluster_test.go b/azurerm/data_source_kubernetes_cluster_test.go index 3dc2e79a6705..aa4a9ff8beea 100644 --- a/azurerm/data_source_kubernetes_cluster_test.go +++ b/azurerm/data_source_kubernetes_cluster_test.go @@ -34,6 +34,8 @@ func TestAccDataSourceAzureRMKubernetesCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.host"), resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.username"), resource.TestCheckResourceAttrSet(dataSourceName, "kube_config.0.password"), + resource.TestCheckResourceAttr(dataSourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(dataSourceName, "kube_admin_config_raw", ""), ), }, }, @@ -59,6 +61,8 @@ func TestAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl(t *testing resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.#", "1"), resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.enabled", "true"), resource.TestCheckResourceAttr(dataSourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + resource.TestCheckResourceAttr(dataSourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(dataSourceName, "kube_admin_config_raw", ""), ), }, }, @@ -88,6 +92,8 @@ func TestAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *test resource.TestCheckResourceAttrSet(dataSourceName, "role_based_access_control.0.azure_active_directory.0.client_app_id"), resource.TestCheckResourceAttrSet(dataSourceName, "role_based_access_control.0.azure_active_directory.0.server_app_id"), resource.TestCheckResourceAttrSet(dataSourceName, "role_based_access_control.0.azure_active_directory.0.tenant_id"), + resource.TestCheckResourceAttr(dataSourceName, "kube_admin_config.#", "1"), + resource.TestCheckResourceAttrSet(dataSourceName, "kube_admin_config_raw"), ), }, }, diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 438e7162c5a3..4df32ca590f7 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -372,6 +372,48 @@ func resourceArmKubernetesCluster() *schema.Resource { }, // Computed + "kube_admin_config": { + Type: schema.TypeList, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "client_certificate": { + Type: schema.TypeString, + Computed: true, + }, + "client_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "cluster_ca_certificate": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "kube_admin_config_raw": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "kube_config": { Type: schema.TypeList, Computed: true, @@ -566,6 +608,23 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) if err := d.Set("service_principal", servicePrincipal); err != nil { return fmt.Errorf("Error setting `service_principal`: %+v", err) } + + // adminProfile is only available for RBAC enabled clusters with AAD + if props.AadProfile != nil { + adminProfile, err := client.GetAccessProfile(ctx, resGroup, name, "clusterAdmin") + if err != nil { + return fmt.Errorf("Error retrieving Admin Access Profile for Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) + } + + adminKubeConfigRaw, adminKubeConfig := flattenKubernetesClusterAccessProfile(adminProfile) + d.Set("kube_admin_config_raw", adminKubeConfigRaw) + if err := d.Set("kube_admin_config", adminKubeConfig); err != nil { + return fmt.Errorf("Error setting `kube_admin_config`: %+v", err) + } + } else { + d.Set("kube_admin_config_raw", "") + d.Set("kube_admin_config", []interface{}{}) + } } kubeConfigRaw, kubeConfig := flattenKubernetesClusterAccessProfile(profile) diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index db157506d567..47d5517d0b67 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -94,6 +94,8 @@ func TestAccAzureRMKubernetesCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.host"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.username"), resource.TestCheckResourceAttrSet(resourceName, "kube_config.0.password"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), resource.TestCheckResourceAttrSet(resourceName, "agent_pool_profile.0.max_pods"), ), }, @@ -125,6 +127,8 @@ func TestAccAzureRMKubernetesCluster_roleBasedAccessControl(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "role_based_access_control.#", "1"), resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.enabled", "true"), resource.TestCheckResourceAttr(resourceName, "role_based_access_control.0.azure_active_directory.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config_raw", ""), ), }, { @@ -160,6 +164,8 @@ func TestAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.server_app_id"), resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.server_app_secret"), resource.TestCheckResourceAttrSet(resourceName, "role_based_access_control.0.azure_active_directory.0.tenant_id"), + resource.TestCheckResourceAttr(resourceName, "kube_admin_config.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "kube_admin_config_raw"), ), }, { diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 74bf12daddb9..8a7e81ec9a9d 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -44,6 +44,10 @@ The following attributes are exported: * `fqdn` - The FQDN of the Azure Kubernetes Managed Cluster. +* `kube_admin_config` - A `kube_admin_config` block as defined below. This is only available when Role Based Access Control with Azure Active Directory is enabled. + +* `kube_admin_config_raw` - Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled. + * `kube_config` - A `kube_config` block as defined below. * `kube_config_raw` - Base64 encoded Kubernetes configuration. @@ -90,7 +94,6 @@ A `agent_pool_profile` block exports the following: * `vnet_subnet_id` - The ID of the Subnet where the Agents in the Pool are provisioned. - --- A `azure_active_directory` block exports the following: @@ -100,6 +103,7 @@ A `azure_active_directory` block exports the following: * `server_app_id` - The Server ID of an Azure Active Directory Application. * `tenant_id` - The Tenant ID used for Azure Active Directory Application. + --- A `http_application_routing` block exports the following: @@ -110,7 +114,7 @@ A `http_application_routing` block exports the following: --- -A `kube_config` block exports the following: +The `kube_admin_config` and `kube_config` blocks exports the following: * `client_key` - Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 80628d3a7f34..abf36dc6abb0 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -194,12 +194,16 @@ The following attributes are exported: * `fqdn` - The FQDN of the Azure Kubernetes Managed Cluster. -* `kube_config_raw` - Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools +* `kube_admin_config` - A `kube_admin_config` block as defined below. This is only available when Role Based Access Control with Azure Active Directory is enabled. -* `http_application_routing` - A `http_application_routing` block as defined below. +* `kube_admin_config_raw` - Raw Kubernetes config for the admin account to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools. This is only available when Role Based Access Control with Azure Active Directory is enabled. * `kube_config` - A `kube_config` block as defined below. +* `kube_config_raw` - Raw Kubernetes config to be used by [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) and other compatible tools + +* `http_application_routing` - A `http_application_routing` block as defined below. + * `node_resource_group` - The auto-generated Resource Group which contains the resources for this Managed Kubernetes Cluster. --- @@ -210,7 +214,7 @@ A `http_application_routing` block exports the following: --- -A `kube_config` exports the following:: +The `kube_admin_config` and `kube_config` blocks export the following:: * `client_key` - Base64 encoded private key used by clients to authenticate to the Kubernetes cluster. From aa8b1f56bfb8c1494a46764279aab061d0acfa61 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 12 Dec 2018 13:26:09 +0000 Subject: [PATCH 3/4] r/kubernetes_cluster: fixing a null-ref --- azurerm/resource_arm_kubernetes_cluster.go | 33 +++++++++++++--------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster.go b/azurerm/resource_arm_kubernetes_cluster.go index 4df32ca590f7..0e413785df41 100644 --- a/azurerm/resource_arm_kubernetes_cluster.go +++ b/azurerm/resource_arm_kubernetes_cluster.go @@ -986,25 +986,30 @@ func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, provider val := input[0].(map[string]interface{}) rbacEnabled := val["enabled"].(bool) - azureADsRaw := val["azure_active_directory"].([]interface{}) - azureAdRaw := azureADsRaw[0].(map[string]interface{}) - clientAppId := azureAdRaw["client_app_id"].(string) - serverAppId := azureAdRaw["server_app_id"].(string) - serverAppSecret := azureAdRaw["server_app_secret"].(string) - tenantId := azureAdRaw["tenant_id"].(string) + var aad *containerservice.ManagedClusterAADProfile + if len(azureADsRaw) > 0 { + azureAdRaw := azureADsRaw[0].(map[string]interface{}) - if tenantId == "" { - tenantId = providerTenantId - } + clientAppId := azureAdRaw["client_app_id"].(string) + serverAppId := azureAdRaw["server_app_id"].(string) + serverAppSecret := azureAdRaw["server_app_secret"].(string) + tenantId := azureAdRaw["tenant_id"].(string) + + if tenantId == "" { + tenantId = providerTenantId + } - return rbacEnabled, &containerservice.ManagedClusterAADProfile{ - ClientAppID: utils.String(clientAppId), - ServerAppID: utils.String(serverAppId), - ServerAppSecret: utils.String(serverAppSecret), - TenantID: utils.String(tenantId), + aad = &containerservice.ManagedClusterAADProfile{ + ClientAppID: utils.String(clientAppId), + ServerAppID: utils.String(serverAppId), + ServerAppSecret: utils.String(serverAppSecret), + TenantID: utils.String(tenantId), + } } + + return rbacEnabled, aad } func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.ManagedClusterProperties, d *schema.ResourceData) []interface{} { From 40eaeb7877aa1272daea2ab713fb0f31464821e4 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 12 Dec 2018 16:05:54 +0000 Subject: [PATCH 4/4] Upgrading the version of K8s being used to fix `TestAccAzureRMKubernetesCluster_upgradeConfig` --- azurerm/resource_arm_kubernetes_cluster_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azurerm/resource_arm_kubernetes_cluster_test.go b/azurerm/resource_arm_kubernetes_cluster_test.go index 47d5517d0b67..b9c5001b8a52 100644 --- a/azurerm/resource_arm_kubernetes_cluster_test.go +++ b/azurerm/resource_arm_kubernetes_cluster_test.go @@ -276,10 +276,10 @@ func TestAccAzureRMKubernetesCluster_upgradeConfig(t *testing.T) { ), }, { - Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, "1.11.3"), + Config: testAccAzureRMKubernetesCluster_upgrade(ri, location, clientId, clientSecret, "1.11.5"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "kubernetes_version", "1.11.3"), + resource.TestCheckResourceAttr(resourceName, "kubernetes_version", "1.11.5"), ), }, },