diff --git a/Makefile b/Makefile index 1483e40dd..bba3c358b 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ VERSION := MOD ?= vendor DOCS_DIR ?= docs/cli -ALL_BUILD_TAGS := "aws,packet,e2e,disruptivee2e,poste2e" +ALL_BUILD_TAGS := "aws,packet,aks,e2e,disruptivee2e,poste2e" ## Adds a '-dirty' suffix to version string if there are uncommitted changes changes := $(shell git status --porcelain) diff --git a/ci/aks/aks-cluster.lokocfg.envsubst b/ci/aks/aks-cluster.lokocfg.envsubst new file mode 100644 index 000000000..47da515fa --- /dev/null +++ b/ci/aks/aks-cluster.lokocfg.envsubst @@ -0,0 +1,118 @@ +variable "cert_manager_email" { + default = "$EMAIL" +} +variable "cluster_name" { + default = "$CLUSTER_ID" +} + +variable "aws_zone_id" { + default = "$AWS_DNS_ZONE_ID" +} + +variable "aws_access_key_id" { + default = "$AWS_ACCESS_KEY_ID" +} + +variable "aws_secret_access_key" { + default = "$AWS_SECRET_ACCESS_KEY" +} + +variable "aws_dns_zone" { + default = "$AWS_DNS_ZONE" +} + +variable "resource_group_name" { + default = "$CLUSTER_ID" +} + +variable "grafana_admin_password" { + default = "admin" +} + +variable "asset_dir" { + default = "~/lokoctl-assets" +} + +variable "workers_count" { + default = 2 +} + +variable "workers_type" { + default = "Standard_D2_v2" +} + +variable "location" { + default = "Germany West Central" +} + +variable "worker_labels" { + default = { + "testing.io" = "yes", + "roleofnode" = "testing", + } +} + +cluster "aks" { + asset_dir = pathexpand(var.asset_dir) + cluster_name = var.cluster_name + + location = var.location + resource_group_name = var.resource_group_name + + worker_pool "default" { + vm_size = var.workers_type + count = var.workers_count + labels = var.worker_labels + } + + tags = { + "owner" = "LokomotiveCI" + } +} + +component "prometheus-operator" { + grafana_admin_password = var.grafana_admin_password + disable_webhooks = true + + monitor { + etcd = false + kube_controller_manager = false + kube_scheduler = false + kube_proxy = false + kubelet = false + } + + coredns { + selector = { + "k8s-app" = "kube-dns", + } + } +} + +component "contour" { + ingress_hosts = [ + "httpbin.${var.cluster_name}.${var.aws_dns_zone}", + ] + service_monitor = true +} + +component "cert-manager" { + email = var.cert_manager_email + service_monitor = true +} + +component "external-dns" { + policy = "sync" + owner_id = var.cluster_name + aws { + zone_id = var.aws_zone_id + aws_access_key_id = var.aws_access_key_id + aws_secret_access_key = var.aws_secret_access_key + } + + service_monitor = true +} + +component "httpbin" { + ingress_host = "httpbin.${var.cluster_name}.${var.aws_dns_zone}" +} diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 96e8ac5c6..98151b1f8 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -21,6 +21,7 @@ import ( "github.com/spf13/viper" // Register platforms by adding an anonymous import. + _ "github.com/kinvolk/lokomotive/pkg/platform/aks" _ "github.com/kinvolk/lokomotive/pkg/platform/aws" _ "github.com/kinvolk/lokomotive/pkg/platform/baremetal" _ "github.com/kinvolk/lokomotive/pkg/platform/packet" diff --git a/examples/aks-testing/cluster.lokocfg b/examples/aks-testing/cluster.lokocfg new file mode 100644 index 000000000..bb56a2845 --- /dev/null +++ b/examples/aks-testing/cluster.lokocfg @@ -0,0 +1,78 @@ +variable "subscription_id" {} +variable "tenant_id" {} +variable "grafana_admin_password" {} +variable "resource_group_name" {} +variable "cert_manager_email" {} + +variable "manage_resource_group" { + default = true +} + +variable "application_name" { + default = "" +} + +variable "asset_dir" { + default = "./lokomotive-assets" +} + +variable "cluster_name" { + default = "lokomotive-cluster" +} + +variable "workers_count" { + default = 1 +} + +variable "workers_type" { + default = "Standard_D2_v2" +} + +variable "location" { + default = "West Europe" +} + +cluster "aks" { + asset_dir = pathexpand(var.asset_dir) + cluster_name = var.cluster_name + + subscription_id = var.subscription_id + tenant_id = var.tenant_id + location = var.location + resource_group_name = var.resource_group_name + application_name = var.application_name + manage_resource_group = var.manage_resource_group + + worker_pool "default" { + vm_size = var.workers_type + count = var.workers_count + } +} + +component "prometheus-operator" { + grafana_admin_password = var.grafana_admin_password + disable_webhooks = true + + monitor { + etcd = false + kube_controller_manager = false + kube_scheduler = false + kube_proxy = false + kubelet = false + } + + coredns { + selector = { + "k8s-app" = "kube-dns", + } + } +} + +component "cert-manager" { + email = var.cert_manager_email + service_monitor = true +} + +component "contour" { + service_monitor = true +} diff --git a/pkg/platform/aks/aks.go b/pkg/platform/aks/aks.go new file mode 100644 index 000000000..6952e7ac0 --- /dev/null +++ b/pkg/platform/aks/aks.go @@ -0,0 +1,344 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package aks is a Platform implementation for creating Kubernetes cluster using +// Azure AKS. +package aks + +import ( + "fmt" + "os" + "path/filepath" + "text/template" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/mitchellh/go-homedir" + + "github.com/kinvolk/lokomotive/pkg/platform" + "github.com/kinvolk/lokomotive/pkg/platform/util" + "github.com/kinvolk/lokomotive/pkg/terraform" +) + +type workerPool struct { + Name string `hcl:"name,label"` + VMSize string `hcl:"vm_size,optional"` + Count int `hcl:"count,optional"` + Labels map[string]string `hcl:"labels,optional"` + Taints []string `hcl:"taints,optional"` +} + +type config struct { + AssetDir string `hcl:"asset_dir,optional"` + ClusterName string `hcl:"cluster_name,optional"` + Tags map[string]string `hcl:"tags,optional"` + + // Azure specific. + SubscriptionID string `hcl:"subscription_id,optional"` + TenantID string `hcl:"tenant_id,optional"` + ClientID string `hcl:"client_id,optional"` + ClientSecret string `hcl:"client_secret,optional"` + + Location string `hcl:"location,optional"` + + // ApplicationName for created service principal. + ApplicationName string `hcl:"application_name,optional"` + + ResourceGroupName string `hcl:"resource_group_name,optional"` + ManageResourceGroup bool `hcl:"manage_resource_group,optional"` + + WorkerPools []workerPool `hcl:"worker_pool,block"` +} + +const ( + name = "aks" + clientIDEnv = "ARM_CLIENT_ID" + clientSecretEnv = "ARM_CLIENT_SECRET" // #nosec G101 + subscriptionIDEnv = "ARM_SUBSCRIPTION_ID" + tenantIDEnv = "ARM_TENANT_ID" +) + +// init registers aks as a platform. +func init() { //nolint:gochecknoinits + c := &config{ + Location: "West Europe", + ManageResourceGroup: true, + } + + platform.Register(name, c) +} + +func (c *config) LoadConfig(configBody *hcl.Body, evalContext *hcl.EvalContext) hcl.Diagnostics { + if configBody == nil { + emptyConfig := hcl.EmptyBody() + configBody = &emptyConfig + } + + if d := gohcl.DecodeBody(*configBody, evalContext, c); d.HasErrors() { + return d + } + + return c.checkValidConfig() +} + +// checkValidConfig validates cluster configuration. +func (c *config) checkValidConfig() hcl.Diagnostics { + var d hcl.Diagnostics + + // Collect all validation functions here. + f := []func() hcl.Diagnostics{ + c.checkNotEmptyWorkers, + c.checkWorkerPoolNamesUnique, + c.checkWorkerPools, + c.checkCredentials, + c.checkRequiredFields, + } + + // Then call all of them and collect the diagnostics. + for _, vf := range f { + d = append(d, vf()...) + } + + return d +} + +func (c *config) checkWorkerPools() hcl.Diagnostics { + var d hcl.Diagnostics + + for _, w := range c.WorkerPools { + if w.VMSize == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("pool %q: VMSize field can't be empty", w.Name), + }) + } + + if w.Count <= 0 { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("pool %q: count must be bigger than 0", w.Name), + }) + } + } + + return d +} + +func (c *config) checkRequiredFields() hcl.Diagnostics { + var d hcl.Diagnostics + + if c.SubscriptionID == "" && os.Getenv(subscriptionIDEnv) == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "cannot find the Azure subscription ID", + Detail: fmt.Sprintf("%q field is empty and %q environment variable "+ + "is not defined. At least one of these should be defined", + "SubscriptionID", subscriptionIDEnv), + }) + } + + if c.TenantID == "" && os.Getenv(tenantIDEnv) == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "cannot find the Azure client ID", + Detail: fmt.Sprintf("%q field is empty and %q environment variable "+ + "is not defined. At least one of these should be defined", "TenantID", tenantIDEnv), + }) + } + + f := map[string]string{ + "AssetDir": c.AssetDir, + "ClusterName": c.ClusterName, + "ResourceGroupName": c.ResourceGroupName, + } + + for k, v := range f { + if v == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("field %q can't be empty", k), + }) + } + } + + return d +} + +// checkCredentials checks if credentials are correctly defined. +func (c *config) checkCredentials() hcl.Diagnostics { + var d hcl.Diagnostics + + // If the application name is defined, we assume, that we work as highly privileged + // account, which has permissions to create new Azure AD application, so Client ID + // and Client Secret fields are not needed. + if c.ApplicationName != "" { + if c.ClientID != "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "ClientID and ApplicationName are mutually exclusive", + }) + } + + if c.ClientSecret != "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "ClientSecret and ApplicationName are mutually exclusive", + }) + } + + return d + } + + if c.ClientSecret == "" && os.Getenv(clientSecretEnv) == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "cannot find the Azure client secret", + Detail: fmt.Sprintf("%q field is empty and %q environment variable "+ + "is not defined. At least one of these should be defined", "ClientSecret", clientSecretEnv), + }) + } + + if c.ClientID == "" && os.Getenv(clientIDEnv) == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "cannot find the Azure client ID", + Detail: fmt.Sprintf("%q field is empty and %q environment variable is "+ + "not defined. At least one of these should be defined", "ClientID", clientIDEnv), + }) + } + + return d +} + +// checkNotEmptyWorkers checks if the cluster has at least 1 node pool defined. +func (c *config) checkNotEmptyWorkers() hcl.Diagnostics { + var diagnostics hcl.Diagnostics + + if len(c.WorkerPools) == 0 { + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "At least one worker pool must be defined", + Detail: "Make sure to define at least one worker pool block in your cluster block", + }) + } + + return diagnostics +} + +// checkWorkerPoolNamesUnique verifies that all worker pool names are unique. +func (c *config) checkWorkerPoolNamesUnique() hcl.Diagnostics { + var diagnostics hcl.Diagnostics + + dup := make(map[string]bool) + + for _, w := range c.WorkerPools { + if !dup[w.Name] { + dup[w.Name] = true + continue + } + + // It is duplicated. + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Worker pools name should be unique", + Detail: fmt.Sprintf("Worker pool '%v' is duplicated", w.Name), + }) + } + + return diagnostics +} + +// Meta is part of Platform interface and returns common information about the platform configuration. +func (c *config) Meta() platform.Meta { + nodes := 0 + for _, workerpool := range c.WorkerPools { + nodes += workerpool.Count + } + + return platform.Meta{ + AssetDir: c.AssetDir, + ExpectedNodes: nodes, + Managed: true, + } +} + +// Apply creates AKS infrastructure via Terraform. +func (c *config) Apply(ex *terraform.Executor) error { + if err := c.Initialize(ex); err != nil { + return err + } + + return ex.Apply() +} + +// Destroy destroys AKS infrastructure via Terraform. +func (c *config) Destroy(ex *terraform.Executor) error { + if err := c.Initialize(ex); err != nil { + return err + } + + return ex.Destroy() +} + +// Initialize creates Terrafrom files required for AKS. +func (c *config) Initialize(ex *terraform.Executor) error { + assetDir, err := homedir.Expand(c.AssetDir) + if err != nil { + return err + } + + terraformRootDir := terraform.GetTerraformRootDir(assetDir) + + return createTerraformConfigFile(c, terraformRootDir) +} + +// createTerraformConfigFiles create Terraform config files in given directory. +func createTerraformConfigFile(cfg *config, terraformRootDir string) error { + t := template.Must(template.New("t").Parse(terraformConfigTmpl)) + + path := filepath.Join(terraformRootDir, "cluster.tf") + + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("failed to create file %q: %w", path, err) + } + + util.AppendTags(&cfg.Tags) + + if cfg.ClientSecret == "" { + cfg.ClientSecret = os.Getenv(clientSecretEnv) + } + + if cfg.SubscriptionID == "" { + cfg.SubscriptionID = os.Getenv(subscriptionIDEnv) + } + + if cfg.ClientID == "" { + cfg.ClientID = os.Getenv(clientIDEnv) + } + + if cfg.TenantID == "" { + cfg.TenantID = os.Getenv(tenantIDEnv) + } + + if err := t.Execute(f, cfg); err != nil { + return fmt.Errorf("failed to write template to file %q: %w", path, err) + } + + if err := f.Close(); err != nil { + return fmt.Errorf("failed closing file %q: %w", path, err) + } + + return nil +} diff --git a/pkg/platform/aks/aks_test.go b/pkg/platform/aks/aks_test.go new file mode 100644 index 000000000..f79c00414 --- /dev/null +++ b/pkg/platform/aks/aks_test.go @@ -0,0 +1,307 @@ +package aks + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclparse" + + lokoconfig "github.com/kinvolk/lokomotive/pkg/config" +) + +// createTerraformConfigFile() +func TestCreateTerraformConfigFile(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "lokoctl-tests-") + if err != nil { + t.Fatalf("creating tmp dir should succeed, got: %v", err) + } + + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + t.Logf("failed to remove temp dir %q: %v", tmpDir, err) + } + }() + + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + VMSize: "bar", + Count: 1, + }, + }, + } + + if err := createTerraformConfigFile(c, tmpDir); err != nil { + t.Fatalf("creating Terraform config files should succeed, got: %v", err) + } +} + +func TestCreateTerraformConfigFileNoWorkerPools(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "lokoctl-tests-") + if err != nil { + t.Fatalf("creating tmp dir should succeed, got: %v", err) + } + + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + t.Logf("failed to remove temp dir %q: %v", tmpDir, err) + } + }() + + c := &config{} + + if err := createTerraformConfigFile(c, tmpDir); err == nil { + t.Fatalf("creating Terraform config files should fail if there is no worker pools defined") + } +} + +func TestCreateTerraformConfigFileNonExistingPath(t *testing.T) { + c := &config{} + + if err := createTerraformConfigFile(c, "/nonexisting"); err == nil { + t.Fatalf("creating Terraform config files in non-existing path should fail") + } +} + +// Meta() +func TestMeta(t *testing.T) { + assetDir := "foo" + + c := &config{ + AssetDir: assetDir, + WorkerPools: []workerPool{ + { + Count: 1, + }, + { + Count: 3, + }, + }, + } + + expectedNodes := 4 + if e := c.Meta().ExpectedNodes; e != expectedNodes { + t.Errorf("Meta should count workers from all pools. Expected %d, got %d", expectedNodes, e) + } + + if a := c.Meta().AssetDir; a != assetDir { + t.Errorf("Meta should return configured asset dir. Expected %q, got %q", assetDir, a) + } +} + +// checkWorkerPoolNamesUnique() +func TestCheckWorkerPoolNamesUniqueDuplicated(t *testing.T) { + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + }, + { + Name: "foo", + }, + }, + } + + if d := c.checkWorkerPoolNamesUnique(); !d.HasErrors() { + t.Fatalf("should return error when worker pools are duplicated") + } +} + +func TestCheckWorkerPoolNamesUnique(t *testing.T) { + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + }, + { + Name: "bar", + }, + }, + } + + if d := c.checkWorkerPoolNamesUnique(); d.HasErrors() { + t.Fatalf("should not return errors when pool names are unique, got: %v", d) + } +} + +// checkNotEmptyWorkers() +func TestNotEmptyWorkersEmpty(t *testing.T) { + c := &config{} + + if d := c.checkNotEmptyWorkers(); !d.HasErrors() { + t.Fatalf("should return error when there is no worker pool defined") + } +} + +func TestNotEmptyWorkers(t *testing.T) { + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + }, + }, + } + + if d := c.checkNotEmptyWorkers(); d.HasErrors() { + t.Fatalf("should not return errors when worker pool is not empty, got: %v", d) + } +} + +// checkConfiguration() +func TestCheckWorkerPoolNamesUniqueTest(t *testing.T) { + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + }, + { + Name: "bar", + }, + }, + } + + if d := c.checkWorkerPoolNamesUnique(); d.HasErrors() { + t.Fatalf("should not return errors when pool names are unique, got: %v", d) + } +} + +// checkCredentials() +func TestCheckCredentialsAppNameAndClientID(t *testing.T) { + c := &config{ + ApplicationName: "foo", + ClientID: "foo", + } + + if d := c.checkCredentials(); !d.HasErrors() { + t.Fatalf("should give error if both ApplicationName and ClientID fields are defined") + } +} + +func TestCheckCredentialsAppNameAndClientSecret(t *testing.T) { + c := &config{ + ApplicationName: "foo", + ClientSecret: "foo", + } + + if d := c.checkCredentials(); !d.HasErrors() { + t.Fatalf("should give error if both ApplicationName and ClientID fields are defined") + } +} + +func TestCheckCredentialsAppNameClientIDAndClientSecret(t *testing.T) { + c := &config{ + ApplicationName: "foo", + ClientID: "foo", + ClientSecret: "foo", + } + + if d := c.checkCredentials(); len(d) != 2 { + t.Fatalf("should give errors for both conflicting ClientID and ClientSecret, got %v", d) + } +} + +func TestCheckCredentialsRequireSome(t *testing.T) { + c := &config{} + + if d := c.checkCredentials(); !d.HasErrors() { + t.Fatalf("should give error if both ApplicationName and ClientID fields are empty") + } +} + +func TestCheckCredentialsRequireClientIDWithClientSecret(t *testing.T) { + c := &config{ + ClientSecret: "foo", + } + + if d := c.checkCredentials(); !d.HasErrors() { + t.Fatalf("should give error if ClientSecret is defined and ClientID is empty") + } +} + +func TestCheckCredentialsReadClientSecretFromEnvironment(t *testing.T) { + if err := os.Setenv(clientSecretEnv, "1"); err != nil { + t.Fatalf("failed to set environment variable %q: %v", clientSecretEnv, err) + } + + defer func() { + if err := os.Setenv(clientSecretEnv, ""); err != nil { + t.Logf("failed unsetting environment variable %q: %v", clientSecretEnv, err) + } + }() + + c := &config{ + ClientID: "foo", + } + + if d := c.checkCredentials(); d.HasErrors() { + t.Fatalf("should read client secret from environment") + } +} + +// LoadConfig() +func loadConfigFromString(t *testing.T, c string) hcl.Diagnostics { + p := hclparse.NewParser() + + f, d := p.ParseHCL([]byte(c), "x.lokocfg") + if d.HasErrors() { + t.Fatalf("parsing HCL should succeed, got: %v", d) + } + + configBody := hcl.MergeFiles([]*hcl.File{f}) + + var rootConfig lokoconfig.RootConfig + + if d := gohcl.DecodeBody(configBody, nil, &rootConfig); d.HasErrors() { + t.Fatalf("decoding root config should succeed, got: %v", d) + } + + cc := &config{} + + return cc.LoadConfig(&rootConfig.Cluster.Config, &hcl.EvalContext{}) +} + +func TestLoadConfig(t *testing.T) { + c := ` +cluster "aks" { + asset_dir = "/fooo" + client_id = "bar" + client_secret = "foo" + cluster_name = "mycluster" + resource_group_name = "test" + subscription_id = "foo" + tenant_id = "bar" + + worker_pool "foo" { + count = 1 + vm_size = "foo" + } +} +` + if d := loadConfigFromString(t, c); d.HasErrors() { + t.Fatalf("valid config should not return error, got: %v", d) + } +} + +func TestLoadConfigEmpty(t *testing.T) { + c := &config{} + + if d := c.LoadConfig(nil, &hcl.EvalContext{}); !d.HasErrors() { + t.Fatalf("empty config should return error, got: %v", d) + } +} + +func TestLoadConfigBadHCL(t *testing.T) { + c := ` +cluster "aks" { + not_defined_field = "doh" +} +` + + if d := loadConfigFromString(t, c); !d.HasErrors() { + t.Fatalf("invalid HCL should return errors") + } +} diff --git a/pkg/platform/aks/template.go b/pkg/platform/aks/template.go new file mode 100644 index 000000000..749a20be8 --- /dev/null +++ b/pkg/platform/aks/template.go @@ -0,0 +1,225 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aks + +var terraformConfigTmpl = `{{- define "resource_group_name" -}} +{{- if .ManageResourceGroup -}} +azurerm_resource_group.aks.name +{{- else -}} +"{{ .ResourceGroupName }}" +{{- end -}} +{{- end -}} + +{{- define "client_id" -}} +{{- if .ApplicationName -}} +azuread_application.aks.application_id +{{- else -}} +"{{ .ClientID }}" +{{- end -}} +{{- end -}} + +{{- define "client_secret" -}} +{{- if .ApplicationName -}} +azuread_application_password.aks.value +{{- else -}} +"{{ .ClientSecret }}" +{{- end -}} +{{- end -}} + +locals { + subscription_id = "{{ .SubscriptionID }}" + tenant_id = "{{ .TenantID }}" + application_name = "{{ .ApplicationName }}" + location = "{{ .Location }}" + resource_group_name = {{ template "resource_group_name" . }} + kubernetes_version = "1.16.7" + cluster_name = "{{ .ClusterName }}" + default_node_pool_name = "{{ (index .WorkerPools 0).Name }}" + default_node_pool_vm_size = "{{ (index .WorkerPools 0).VMSize }}" + default_node_pool_count = {{ (index .WorkerPools 0).Count }} + client_id = {{ template "client_id" . }} + client_secret = {{ template "client_secret" . }} +} + +provider "azurerm" { + version = "2.2.0" + + # https://github.com/terraform-providers/terraform-provider-azurerm/issues/5893 + features {} +} + +provider "local" { + version = "1.4.0" +} + +{{- if .ApplicationName }} +provider "azuread" { + version = "0.8.0" +} + +provider "random" { + version = "2.2.1" +} + +resource "azuread_application" "aks" { + name = local.application_name +} + +resource "azuread_service_principal" "aks" { + application_id = azuread_application.aks.application_id + + {{- if .Tags }} + tags = { + {{- range $k, $v := .Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} + +resource "random_string" "password" { + length = 16 + special = true + + override_special = "/@\" " +} + +resource "azuread_application_password" "aks" { + application_object_id = azuread_application.aks.object_id + value = random_string.password.result + end_date_relative = "86000h" +} + +resource "azurerm_role_assignment" "aks" { + scope = "/subscriptions/${local.subscription_id}" + role_definition_name = "Contributor" + principal_id = azuread_service_principal.aks.id +} +{{- end }} + +{{- if .ManageResourceGroup }} +resource "azurerm_resource_group" "aks" { + name = "{{ .ResourceGroupName }}" + location = local.location + + {{- if .Tags }} + tags = { + {{- range $k, $v := .Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} +{{- end }} + +resource "azurerm_kubernetes_cluster" "aks" { + name = local.cluster_name + location = local.location + resource_group_name = local.resource_group_name + kubernetes_version = local.kubernetes_version + dns_prefix = local.cluster_name + + default_node_pool { + name = local.default_node_pool_name + vm_size = local.default_node_pool_vm_size + node_count = local.default_node_pool_count + + {{- if (index .WorkerPools 0).Labels }} + node_labels = { + {{- range $k, $v := (index .WorkerPools 0).Labels }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} + + {{- if (index .WorkerPools 0).Taints }} + node_taints = [ + {{- range (index .WorkerPools 0).Taints }} + "{{ . }}", + {{- end }} + ] + {{- end }} + } + + role_based_access_control { + enabled = true + } + + service_principal { + client_id = local.client_id + client_secret = local.client_secret + } + + network_profile { + network_plugin = "kubenet" + network_policy = "calico" + } + + {{- if .Tags }} + tags = { + {{- range $k, $v := .Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} + +{{ range $index, $pool := (slice .WorkerPools 1) }} +resource "azurerm_kubernetes_cluster_node_pool" "worker-{{ $pool.Name }}" { + name = "{{ $pool.Name }}" + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id + vm_size = "{{ $pool.VMSize }}" + node_count = "{{ $pool.Count }}" + + {{- if $pool.Labels }} + node_labels = { + {{- range $k, $v := $pool.Labels }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} + + + {{- if $pool.Taints }} + node_taints = [ + {{- range $pool.Taints }} + "{{ . }}", + {{- end }} + ] + {{- end }} + + + {{- if $.Tags }} + tags = { + {{- range $k, $v := $.Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} +{{- end }} + +resource "local_file" "kubeconfig" { + sensitive_content = azurerm_kubernetes_cluster.aks.kube_config_raw + filename = "../cluster-assets/auth/kubeconfig" +} + +# Stub output, which indicates, that Terraform run at least once. +# Used when checking, if we should ask user for confirmation, when +# applying changes to the cluster. +output "initialized" { + value = true +} +` diff --git a/test/components/cert-manager/cert-manager_test.go b/test/components/cert-manager/cert-manager_test.go index ff53179c6..3d5388179 100644 --- a/test/components/cert-manager/cert-manager_test.go +++ b/test/components/cert-manager/cert-manager_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package certmanager diff --git a/test/components/contour/contour_test.go b/test/components/contour/contour_test.go index c4546aeb2..7b2545ef6 100644 --- a/test/components/contour/contour_test.go +++ b/test/components/contour/contour_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package contour diff --git a/test/components/external-dns/external-dns_test.go b/test/components/external-dns/external-dns_test.go index 7c1b2b8d7..b57d3da9f 100644 --- a/test/components/external-dns/external-dns_test.go +++ b/test/components/external-dns/external-dns_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package externaldns diff --git a/test/components/httpbin/httpbin_test.go b/test/components/httpbin/httpbin_test.go index 985eb7f1c..2aa179bcb 100644 --- a/test/components/httpbin/httpbin_test.go +++ b/test/components/httpbin/httpbin_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package httpbin diff --git a/test/components/prometheus-operator/prometheus_operator_test.go b/test/components/prometheus-operator/prometheus_operator_test.go index 2317394a1..cdee0cff6 100644 --- a/test/components/prometheus-operator/prometheus_operator_test.go +++ b/test/components/prometheus-operator/prometheus_operator_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package prometheusoperator diff --git a/test/components/util/util.go b/test/components/util/util.go index 19633a0c0..073f2bee3 100644 --- a/test/components/util/util.go +++ b/test/components/util/util.go @@ -337,6 +337,9 @@ const ( // PlatformBaremetal is for Baremetal PlatformBaremetal = "baremetal" + + // PlatformAKS is for AKS + PlatformAKS = "aks" ) // IsPlatformSupported takes in the test object and the list of supported platforms. The function diff --git a/test/monitoring/components_alerts_test.go b/test/monitoring/components_alerts_test.go index 47351b8d6..35bfad2ef 100644 --- a/test/monitoring/components_alerts_test.go +++ b/test/monitoring/components_alerts_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build poste2e package monitoring diff --git a/test/monitoring/components_metrics_test.go b/test/monitoring/components_metrics_test.go index ea2c8ecf7..e27cd721d 100644 --- a/test/monitoring/components_metrics_test.go +++ b/test/monitoring/components_metrics_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build poste2e package monitoring @@ -31,6 +31,11 @@ import ( //nolint:funlen func testComponentsPrometheusMetrics(t *testing.T, v1api v1.API) { + selfHostedPlatforms := []testutil.Platform{ + testutil.PlatformPacket, + testutil.PlatformAWS, + } + testCases := []struct { componentName string query string @@ -47,18 +52,22 @@ func testComponentsPrometheusMetrics(t *testing.T, v1api v1.API) { { componentName: "kube-scheduler", query: "scheduler_schedule_attempts_total", + platforms: selfHostedPlatforms, }, { componentName: "kube-controller-manager", query: "workqueue_work_duration_seconds_bucket", + platforms: selfHostedPlatforms, }, { componentName: "kube-proxy", query: "kubeproxy_sync_proxy_rules_duration_seconds_bucket", + platforms: selfHostedPlatforms, }, { componentName: "kubelet", query: "kubelet_running_pod_count", + platforms: selfHostedPlatforms, }, { componentName: "metallb", @@ -68,12 +77,12 @@ func testComponentsPrometheusMetrics(t *testing.T, v1api v1.API) { { componentName: "contour", query: "contour_dagrebuild_timestamp", - platforms: []testutil.Platform{testutil.PlatformPacket, testutil.PlatformAWS}, + platforms: []testutil.Platform{testutil.PlatformPacket, testutil.PlatformAWS, testutil.PlatformAKS}, }, { componentName: "cert-manager", query: "certmanager_controller_sync_call_count", - platforms: []testutil.Platform{testutil.PlatformPacket, testutil.PlatformAWS}, + platforms: []testutil.Platform{testutil.PlatformPacket, testutil.PlatformAWS, testutil.PlatformAKS}, }, } diff --git a/test/monitoring/monitoring_test.go b/test/monitoring/monitoring_test.go index 1f004bdb6..4ca5d61d8 100644 --- a/test/monitoring/monitoring_test.go +++ b/test/monitoring/monitoring_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build poste2e package monitoring diff --git a/test/monitoring/scrape_targets_test.go b/test/monitoring/scrape_targets_test.go index e5e17ce56..723490919 100644 --- a/test/monitoring/scrape_targets_test.go +++ b/test/monitoring/scrape_targets_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build poste2e package monitoring