From ebcf2e3faf5f80c7e8fb0bea42e41fd5ed48184c Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Thu, 26 Mar 2020 10:55:37 +0100 Subject: [PATCH] Add AKS platform support Refs #215 #216 Signed-off-by: Mateusz Gozdek --- cli/cmd/root.go | 1 + examples/aks-testing/cluster.lokocfg | 45 +++++++ pkg/platform/aks/aks.go | 188 +++++++++++++++++++++++++++ pkg/platform/aks/template.go | 148 +++++++++++++++++++++ 4 files changed, 382 insertions(+) create mode 100644 examples/aks-testing/cluster.lokocfg create mode 100644 pkg/platform/aks/aks.go create mode 100644 pkg/platform/aks/template.go diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 96e8ac5c6..98151b1f8 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -21,6 +21,7 @@ import ( "github.com/spf13/viper" // Register platforms by adding an anonymous import. + _ "github.com/kinvolk/lokomotive/pkg/platform/aks" _ "github.com/kinvolk/lokomotive/pkg/platform/aws" _ "github.com/kinvolk/lokomotive/pkg/platform/baremetal" _ "github.com/kinvolk/lokomotive/pkg/platform/packet" diff --git a/examples/aks-testing/cluster.lokocfg b/examples/aks-testing/cluster.lokocfg new file mode 100644 index 000000000..9df713acc --- /dev/null +++ b/examples/aks-testing/cluster.lokocfg @@ -0,0 +1,45 @@ +variable "subscription_id" {} +variable "tenant_id" {} +variable "grafana_admin_password" {} +variable "resource_group_name" {} +variable "application_name" {} + +variable "asset_dir" { + default = "./lokomotive-assets" +} + +variable "cluster_name" { + default = "lokomotive-cluster" +} + +variable "workers_count" { + default = 1 +} + +variable "workers_type" { + default = "Standard_D2_v2" +} + +variable "location" { + default = "West Europe" +} + +cluster "aks" { + asset_dir = pathexpand(var.asset_dir) + cluster_name = var.cluster_name + + subscription_id = var.subscription_id + tenant_id = var.tenant_id + location = var.location + resource_group_name = var.resource_group_name + application_name = var.application_name + + worker_pool "default" { + vm_size = var.workers_type + count = var.workers_count + } +} + +component "prometheus-operator" { + grafana_admin_password = var.grafana_admin_password +} diff --git a/pkg/platform/aks/aks.go b/pkg/platform/aks/aks.go new file mode 100644 index 000000000..9bfcbf076 --- /dev/null +++ b/pkg/platform/aks/aks.go @@ -0,0 +1,188 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package aks is a Platform implementation for creating Kubernetes cluster using +// Azure AKS. +package aks + +import ( + "fmt" + "os" + "path/filepath" + "text/template" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/mitchellh/go-homedir" + + "github.com/kinvolk/lokomotive/pkg/platform" + "github.com/kinvolk/lokomotive/pkg/platform/util" + "github.com/kinvolk/lokomotive/pkg/terraform" +) + +type workerPool struct { + Name string `hcl:"name,label"` + VMSize string `hcl:"vm_size"` + Count int `hcl:"count"` +} + +type config struct { + AssetDir string `hcl:"asset_dir"` + ClusterName string `hcl:"cluster_name"` + Tags map[string]string `hcl:"tags,optional"` + + // Azure specific. + SubscriptionID string `hcl:"subscription_id"` + TenantID string `hcl:"tenant_id"` + Location string `hcl:"location,optional"` + + // ApplicationName for created service principal + ApplicationName string `hcl:"application_name"` + ResourceGroupName string `hcl:"resource_group_name"` + + WorkerPools []workerPool `hcl:"worker_pool,block"` +} + +// init registers aks as a platform. +func init() { //nolint:gochecknoinits + c := &config{ + Location: "West Europe", + } + + platform.Register("aks", c) +} + +func (c *config) LoadConfig(configBody *hcl.Body, evalContext *hcl.EvalContext) hcl.Diagnostics { + if configBody == nil { + return hcl.Diagnostics{} + } + + if diags := gohcl.DecodeBody(*configBody, evalContext, c); len(diags) != 0 { + return diags + } + + return c.checkValidConfig() +} + +// checkValidConfig validates cluster configuration. +func (c *config) checkValidConfig() hcl.Diagnostics { + var diagnostics hcl.Diagnostics + + diagnostics = append(diagnostics, c.checkNotEmptyWorkers()...) + diagnostics = append(diagnostics, c.checkWorkerPoolNamesUnique()...) + + return diagnostics +} + +// checkNotEmptyWorkers checks if the cluster has at least 1 node pool defined. +func (c *config) checkNotEmptyWorkers() hcl.Diagnostics { + var diagnostics hcl.Diagnostics + + if len(c.WorkerPools) == 0 { + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "At least one worker pool must be defined", + Detail: "Make sure to define at least one worker pool block in your cluster block", + }) + } + + return diagnostics +} + +// checkWorkerPoolNamesUnique verifies that all worker pool names are unique. +func (c *config) checkWorkerPoolNamesUnique() hcl.Diagnostics { + var diagnostics hcl.Diagnostics + + dup := make(map[string]bool) + + for _, w := range c.WorkerPools { + if !dup[w.Name] { + dup[w.Name] = true + continue + } + + // It is duplicated. + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Worker pools name should be unique", + Detail: fmt.Sprintf("Worker pool '%v' is duplicated", w.Name), + }) + } + + return diagnostics +} + +// Meta is part of Platform interface and returns common information about the platform configuration. +func (c *config) Meta() platform.Meta { + nodes := 0 + for _, workerpool := range c.WorkerPools { + nodes += workerpool.Count + } + + return platform.Meta{ + AssetDir: c.AssetDir, + ExpectedNodes: nodes, + Managed: true, + } +} + +func (c *config) Apply(ex *terraform.Executor) error { + if err := c.Initialize(ex); err != nil { + return err + } + + return ex.Apply() +} + +func (c *config) Destroy(ex *terraform.Executor) error { + if err := c.Initialize(ex); err != nil { + return err + } + + return ex.Destroy() +} + +func (c *config) Initialize(ex *terraform.Executor) error { + assetDir, err := homedir.Expand(c.AssetDir) + if err != nil { + return err + } + + terraformRootDir := terraform.GetTerraformRootDir(assetDir) + + return createTerraformConfigFile(c, terraformRootDir) +} + +func createTerraformConfigFile(cfg *config, terraformRootDir string) error { + t := template.Must(template.New("t").Parse(terraformConfigTmpl)) + + path := filepath.Join(terraformRootDir, "cluster.tf") + + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("failed to create file %q: %w", path, err) + } + + util.AppendTags(&cfg.Tags) + + if err := t.Execute(f, cfg); err != nil { + return fmt.Errorf("failed to write template to file %q: %w", path, err) + } + + if err := f.Close(); err != nil { + return fmt.Errorf("failed closing file %q: %w", path, err) + } + + return nil +} diff --git a/pkg/platform/aks/template.go b/pkg/platform/aks/template.go new file mode 100644 index 000000000..ca8cad20e --- /dev/null +++ b/pkg/platform/aks/template.go @@ -0,0 +1,148 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aks + +var terraformConfigTmpl = ` +locals { + subscription_id = "{{ .SubscriptionID }}" + tenant_id = "{{ .TenantID }}" + application_name = "{{ .ApplicationName }}" + location = "{{ .Location }}" + resource_group_name = "{{ .ResourceGroupName }}" + kubernetes_version = "1.16.7" + cluster_name = "{{ .ClusterName }}" + default_node_pool_name = "{{ (index .WorkerPools 0).Name }}" + default_node_pool_vm_size = "{{ (index .WorkerPools 0).VMSize }}" + default_node_pool_count = {{ (index .WorkerPools 0).Count }} +} + +provider "azurerm" { + version = "2.2.0" + + # https://github.com/terraform-providers/terraform-provider-azurerm/issues/5893 + features {} +} + +provider "azuread" { + version = "0.8.0" +} + +provider "random" { + version = "2.2.1" +} + +provider "local" { + version = "1.4.0" +} + +# TODO: user may want to provide their own resource group. +resource "azurerm_resource_group" "aks" { + name = local.resource_group_name + location = local.location +} + +# TODO: user may want to provide their own service principle details. +resource "azuread_application" "aks" { + name = local.application_name +} + +resource "azuread_service_principal" "aks" { + application_id = azuread_application.aks.application_id +} + +resource "random_string" "password" { + length = 16 + special = true + + override_special = "/@\" " +} + +resource "azuread_application_password" "aks" { + application_object_id = azuread_application.aks.object_id + value = random_string.password.result + end_date_relative = "86000h" +} + +resource "azurerm_role_assignment" "aks" { + scope = "/subscriptions/${local.subscription_id}" + role_definition_name = "Contributor" + principal_id = azuread_service_principal.aks.id +} + +resource "azurerm_kubernetes_cluster" "aks" { + name = local.cluster_name + location = azurerm_resource_group.aks.location + resource_group_name = azurerm_resource_group.aks.name + kubernetes_version = local.kubernetes_version + dns_prefix = local.cluster_name + + default_node_pool { + name = local.default_node_pool_name + vm_size = local.default_node_pool_vm_size + node_count = local.default_node_pool_count + } + + role_based_access_control { + enabled = true + } + + service_principal { + client_id = azuread_application.aks.application_id + client_secret = azuread_application_password.aks.value + } + + network_profile { + network_plugin = "kubenet" + network_policy = "calico" + } + + {{- if .Tags }} + tags = { + {{- range $k, $v := .Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} + +{{ range $index, $pool := (slice .WorkerPools 1) }} +resource "azurerm_kubernetes_cluster_node_pool" "worker-{{ $pool.Name }}" { + name = "{{ $pool.Name }}" + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id + vm_size = "{{ $pool.VMSize }}" + node_count = "{{ $pool.Count }}" + + {{- if $.Tags }} + tags = { + {{- range $k, $v := $.Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} +{{- end }} + +resource "local_file" "kubeconfig" { + sensitive_content = azurerm_kubernetes_cluster.aks.kube_config_raw + filename = "../cluster-assets/auth/kubeconfig" +} + +# Stub output, which indicates, that Terraform run at least once. +# Used when checking, if we should ask user for confirmation, when +# applying changes to the cluster. +output "initialized" { + value = true +} +`