diff --git a/Makefile b/Makefile index 57c08a812..be8f24029 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ VERSION := MOD ?= vendor DOCS_DIR ?= docs/cli -ALL_BUILD_TAGS := "aws,packet,e2e,disruptivee2e,poste2e" +ALL_BUILD_TAGS := "aws,packet,aks,e2e,disruptivee2e,poste2e" ## Adds a '-dirty' suffix to version string if there are uncommitted changes changes := $(shell git status --porcelain) diff --git a/ci/aks/aks-cluster.lokocfg.envsubst b/ci/aks/aks-cluster.lokocfg.envsubst new file mode 100644 index 000000000..7f5191887 --- /dev/null +++ b/ci/aks/aks-cluster.lokocfg.envsubst @@ -0,0 +1,118 @@ +variable "cert_manager_email" { + default = "$EMAIL" +} +variable "cluster_name" { + default = "$CLUSTER_ID" +} + +variable "aws_zone_id" { + default = "$AWS_DNS_ZONE_ID" +} + +variable "aws_access_key_id" { + default = "$AWS_ACCESS_KEY_ID" +} + +variable "aws_secret_access_key" { + default = "$AWS_SECRET_ACCESS_KEY" +} + +variable "aws_dns_zone" { + default = "$AWS_DNS_ZONE" +} + +variable "resource_group_name" { + default = "$CLUSTER_ID" +} + +variable "grafana_admin_password" { + default = "admin" +} + +variable "asset_dir" { + default = "~/lokoctl-assets" +} + +variable "workers_count" { + default = 2 +} + +variable "workers_type" { + default = "Standard_D2_v2" +} + +variable "location" { + default = "Germany West Central" +} + +variable "worker_labels" { + default = { + "testing.io" = "yes", + "roleofnode" = "testing", + } +} + +cluster "aks" { + asset_dir = pathexpand(var.asset_dir) + cluster_name = var.cluster_name + + location = var.location + resource_group_name = var.resource_group_name + + worker_pool "default" { + vm_size = var.workers_type + count = var.workers_count + labels = var.worker_labels + } + + tags = { + "owner" = "LokomotiveCIAKS" + } +} + +component "prometheus-operator" { + grafana_admin_password = var.grafana_admin_password + disable_webhooks = true + + monitor { + etcd = false + kube_controller_manager = false + kube_scheduler = false + kube_proxy = false + kubelet = false + } + + coredns { + selector = { + "k8s-app" = "kube-dns", + } + } +} + +component "contour" { + ingress_hosts = [ + "httpbin.${var.cluster_name}.${var.aws_dns_zone}", + ] + service_monitor = true +} + +component "cert-manager" { + email = var.cert_manager_email + service_monitor = true +} + +component "external-dns" { + policy = "sync" + owner_id = var.cluster_name + aws { + zone_id = var.aws_zone_id + aws_access_key_id = var.aws_access_key_id + aws_secret_access_key = var.aws_secret_access_key + } + + service_monitor = true +} + +component "httpbin" { + ingress_host = "httpbin.${var.cluster_name}.${var.aws_dns_zone}" +} diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 4118aaa67..8000e3dbb 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/viper" // Register platforms by adding an anonymous import. + _ "github.com/kinvolk/lokomotive/pkg/platform/aks" _ "github.com/kinvolk/lokomotive/pkg/platform/aws" _ "github.com/kinvolk/lokomotive/pkg/platform/baremetal" _ "github.com/kinvolk/lokomotive/pkg/platform/packet" diff --git a/docs/configuration-reference/platforms/aks.md b/docs/configuration-reference/platforms/aks.md new file mode 100644 index 000000000..4542105b5 --- /dev/null +++ b/docs/configuration-reference/platforms/aks.md @@ -0,0 +1,129 @@ +# Lokomotive AKS configuration reference + +## Contents + +* [Introduction](#introduction) +* [Prerequisites](#prerequisites) +* [Configuration](#configuration) +* [Attribute reference](#attribute-reference) +* [Applying](#applying) +* [Destroying](#destroying) + +## Introduction + +This configuration reference provides information on configuring a Lokomotive cluster on Azure AKS with all the configuration options available to the user. + +## Prerequisites + +* `lokoctl` [installed locally](../../installer/lokoctl.md). +* `kubectl` installed locally to access the Kubernetes cluster. + +## Configuration + +To create a Lokomotive cluster, we need to define a configuration. + +Example configuration file: + +```tf +#myakscluster.lokocfg +variable "state_s3_bucket" {} +variable "lock_dynamodb_table" {} +variable "asset_dir" {} +variable "cluster_name" {} +variable "workers_count" {} +variable "state_s3_key" {} +variable "state_s3_region" {} +variable "workers_vm_size" {} +variable "location" {} +variable "tenant_id" {} +variable "subscription_id" {} +variable "client_id" {} +variable "client_secret" {} +variable "resource_group_name" {} +variable "application_name" {} +variable "manage_resource_group" {} + +backend "s3" { + bucket = var.state_s3_bucket + key = var.state_s3_key + region = var.state_s3_region + dynamodb_table = var.lock_dynamodb_table +} + +# backend "local" { +# path = "path/to/local/file" +#} + + +cluster "aks" { + asset_dir = pathexpand(var.asset_dir) + cluster_name = var.cluster_name + + tenant_id = var.tenant_id + subscription_id = var.subscription_id + client_id = var.client_id + client_secret = var.client_secret + + location = var.location + resource_group_name = var.resource_group_name + application_name = var.application_name + manage_resource_group = var.manage_resource_group + + worker_pool "default" { + count = var.workers_count + vm_size = var.workers_vm_size + + labels = { + "key" = "value", + } + + taints = [ + "node-role.kubernetes.io/master=NoSchedule", + ] + } + + tags = { + "key" = "value", + } +} +``` + +**NOTE**: Should you feel differently about the default values, you can set default values using the `variable` +block in the cluster configuration. + +## Attribute reference + +| Argument | Description | Default | Required | +| ----------------------- | ------------------------------------------------------------ | :-----------: | :------: | +| `asset_dir` | Location where Lokomotive stores cluster assets. | - | true | +| `cluster_name` | Name of the cluster. **NOTE**: It must be unique per resource group. | - | true | +| `tenant_id` | Azure Tenant ID. Can also be provided using the `LOKOMOTIVE_AKS_TENANT_ID` environment variable. | - | true | +| `subscription_id` | Azure Subscription ID. Can also be provided using the `LOKOMOTIVE_AKS_SUBSCRIPTION_ID` environment variable. | - | true | +| `resource_group_name` | Name of the resource group, where AKS cluster object will be created. Please note, that AKS will also create a separate resource group for workers and other required objects, like load balancers, disks etc. If `manage_resource_group` parameter is set to `false`, this resource group must be manually created before cluster creation. | - | true | +| `client_id` | Azure service principal ID used for running the AKS cluster. Can also be provided using the `LOKOMOTIVE_AKS_CLIENT_ID`. This parameter is mutually exclusive with `application_name` parameter. | - | false | +| `client_secret` | Azure service principal secret used for running the AKS cluster. Can also be provided using the `LOKOMOTIVE_AKS_CLIENT_SECRET`. This parameter is mutually exclusive with `application_name` parameter. | - | false | +| `tags` | Additional tags for Azure resources. | - | false | +| `location` | Azure location where resources will be created. Valid values can be obtained using the following command from Azure CLI: `az account list-locations -o table`. | "West Europe" | false | +| `application_name` | Azure AD application name. If specified, a new Application will be created in Azure AD together with a service principal, which will be used to run the AKS cluster on behalf of the user to provide full cluster creation automation. Please note that this requires [permissions to create applications in Azure AD](https://docs.microsoft.com/en-us/azure/active-directory/users-groups-roles/roles-delegate-app-roles). This parameter is mutually exclusive with `client_id` and `client_secret`. | - | false | +| `manage_resource_group` | If `true`, a resource group for the AKS object will be created on behalf of the user. | true | false | +| `worker_pool` | Configuration block for worker pools. At least one worker pool must be defined. | - | true | +| `worker_pool.count` | Number of workers in the worker pool. Can be changed afterwards to add or delete workers. | - | true | +| `worker_pool.vm_size` | Azure VM size for worker nodes. | - | true | +| `worker_pool.labels` | Map of Kubernetes Node object labels. | - | false | +| `worker_pool.taints` | List of Kubernetes Node taints. | - | false | + +## Applying + +To create the cluster, execute the following command: + +```console +lokoctl cluster apply +``` + +## Destroying + +To destroy the Lokomotive cluster, execute the following command: + +```console +lokoctl cluster destroy --confirm +``` diff --git a/docs/quickstarts/aks.md b/docs/quickstarts/aks.md new file mode 100644 index 000000000..7256fb7a2 --- /dev/null +++ b/docs/quickstarts/aks.md @@ -0,0 +1,197 @@ +# Lokomotive AKS quickstart guide + +## Contents + +* [Introduction](#introduction) +* [Requirements](#requirements) +* [Step 1: Install lokoctl](#step-1-install-lokoctl) +* [Step 2: Set up a working directory](#step-2-set-up-a-working-directory) +* [Step 3: Set up Azure credentials from environment variables](#step-3-set-up-azure-credentials-from-environment-variables) +* [Step 4: Prepare AKS credentials](#step-4-prepare-aks-credentials) +* [Step 5: Define cluster configuration](#step-5-define-cluster-configuration) +* [Step 6: Create Lokomotive cluster](#step-6-create-lokomotive-cluster) +* [Verification](#verification) +* [Cleanup](#cleanup) +* [Conclusion](#conclusion) +* [Next steps](#next-steps) + +## Introduction + +This quickstart guide walks through the steps needed to create a Lokomotive cluster on AKS. + +By the end of this guide, you'll have a production-ready Kubernetes cluster running on Azure AKS. + +_Note: Lokomotive on AKS currently provides Kubernetes 1.16 as opposed to other platforms, which provide 1.18. This is because of limitations of Azure platform._ + +## Requirements + +* Basic understanding of Kubernetes concepts. +* [Azure account](https://azure.microsoft.com/en-us/free/). +* `kubectl` installed locally to access the Kubernetes cluster. + +## Steps + +### Step 1: Install lokoctl + +lokoctl is a command-line interface for Lokomotive. + +To install `lokoctl`, follow the instructions in the [lokoctl installation](../installer/lokoctl.md) +guide. + +### Step 2: Set up a working directory + +It's better to start fresh in a new working directory, as the state of the cluster is stored in this +directory. + +This also makes the cleanup task easier. + +```console +mkdir -p lokomotive-infra/myakscluster +cd lokomotive-infra/myakscluster +``` + +### Step 3: Set up Azure credentials from environment variables + +To create an AKS resource in Azure, you need to authenticate to it first. Follow +[Authenticating to Azure](https://www.terraform.io/docs/providers/azurerm/index.html#authenticating-to-azure) +to set up environment variables required for creating AKS cluster. + +### Step 4: Prepare AKS credentials + +An AKS cluster requires a set of service principal credentials to run, as it talks to Azure API to create Load Balancers, +Disks and other objects. Depending on your level of privileges in your Azure tenant, there are different ways to provide them. + +#### Azure AD Application Creator (full automation) + +If you are an Azure AD administrator or if your Azure user has permissions to create Azure AD Applications then +you don't need to prepare anything manually. When configuring a cluster, set the `application_name` property +to e.g. the cluster name and `lokoctl` will create Azure AD application for you, together with the associated +service principal and credentials. Those credentials will be automatically used for running AKS. + +#### Subscription collaborator + +If you are a user with full administrative access to your subscription, then you need to ask your administrator to create +Azure AD application for you and provide you a Service Principal Client ID and a Client secret, which will be used by AKS cluster. + +#### Resource group collaborator + +If your Azure user has only access to a single Resource Group, you must set the `manage_resource_group` property to `false`, +as otherwise `lokoctl` will try to create Resource Group for you. + +You also need Service Principal credentials, as explained in [#subscription-collabolator](#subscription-collabolator). + +### Step 5: Define cluster configuration + +To create a Lokomotive cluster, we need to define a configuration. + +A [production-ready configuration](../../examples/aks-production) is already provided for ease of +use. Copy the example configuration to the working directory and modify accordingly. + +The provided configuration installs the Lokomotive cluster and the following components: + +* [prometheus-operator](../configuration-reference/components/prometheus-operator.md) +* [cert-manager](../configuration-reference/components/cert-manager.md) +* [contour](../configuration-reference/components/contour.md) + +You can configure the components as per your requirements. + +Create a variables file named `lokocfg.vars` in the working directory to set values for variables +defined in the configuration file. + +```console +#lokocfg.vars +state_s3_bucket = "name-of-the-s3-bucket-to-store-the-cluster-state" +lock_dynamodb_table = "name-of-the-dynamodb-table-for-state-locking" + +cert_manager_email = "email-address-used-for-cert-manager-component" +grafana_admin_password = "password-for-grafana" +``` + +**NOTE**: You can separate component configurations from cluster configuration in separate +configuration files if doing so fits your needs. + +Example: +```console +$ ls lokomotive-infra/myakscluster +cluster.lokocfg prometheus-operator.lokocfg lokocfg.vars +``` + +For advanced cluster configurations and more information refer to the [AKS configuration +guide](../configuration-reference/platforms/aks.md). + +### Step 6: Create Lokomotive cluster + +Run the following command to create the cluster: + +```console +lokoctl cluster apply +``` +Once the command finishes, your Lokomotive cluster details are stored in the path you've specified +under `asset_dir`. + +## Verification + +A successful installation results in the output: + +```console +azurerm_kubernetes_cluster.aks: Still creating... [8m0s elapsed] +azurerm_kubernetes_cluster.aks: Still creating... [8m10s elapsed] +azurerm_kubernetes_cluster.aks: Still creating... [8m20s elapsed] +azurerm_kubernetes_cluster.aks: Creation complete after 8m24s [id=/subscriptions/55555555-4444-3333-2222-1111111111/resourcegroups/ci1586244933-fg/providers/Microsoft.ContainerService/managedClusters/ci1586244933-fg] +local_file.kubeconfig: Creating... +local_file.kubeconfig: Creation complete after 0s [id=f96468e341a652192af7508836430241e6f49df1] + +Apply complete! Resources: 3 added, 0 changed, 0 destroyed. + +Outputs: + +initialized = true + +Your configurations are stored in /root/lokoctl-assets + +Now checking health and readiness of the cluster nodes ... + +Node Ready Reason Message + +aks-default-31666422-vmss000000 True KubeletReady kubelet is posting ready status. AppArmor enabled +aks-default-31666422-vmss000001 True KubeletReady kubelet is posting ready status. AppArmor enabled + +Success - cluster is healthy and nodes are ready! +``` + +Use the generated `kubeconfig` file to access the Kubernetes cluster and list nodes. + +```console +export KUBECONFIG=./lokomotive-assets/cluster-assets/auth/kubeconfig +kubectl get nodes +``` + +## Using the cluster + +At this point you have access to the Kubernetes cluster and can use it! +If you don't have Kubernetes experience you can check out the [Kubernetes +Basics official +documentation](https://kubernetes.io/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro/) +to learn about its usage. + +## Cleanup + +To destroy the Lokomotive cluster, execute the following command: + +```console +lokoctl cluster destroy --confirm +``` + +You can safely delete the working directory created for this quickstart guide if you no longer +require it. + +## Conclusion + +After walking through this guide, you've learned how to set up a Lokomotive cluster on AKS. + +## Next steps + +You can now start deploying your workloads on the cluster. + +For more information on installing supported Lokomotive components, you can visit the [component +configuration references](../configuration-reference/components). diff --git a/examples/aks-production/cluster.lokocfg b/examples/aks-production/cluster.lokocfg new file mode 100644 index 000000000..3aa711d7d --- /dev/null +++ b/examples/aks-production/cluster.lokocfg @@ -0,0 +1,95 @@ +variable "subscription_id" {} +variable "tenant_id" {} +variable "grafana_admin_password" {} +variable "resource_group_name" {} +variable "cert_manager_email" {} +variable "state_s3_bucket" {} +variable "lock_dynamodb_table" {} + +variable "manage_resource_group" { + default = true +} + +variable "application_name" { + default = "" +} + +variable "asset_dir" { + default = "./lokomotive-assets" +} + +variable "cluster_name" { + default = "lokomotive-cluster" +} + +variable "workers_count" { + default = 3 +} + +variable "workers_type" { + default = "Standard_D2_v2" +} + +variable "location" { + default = "West Europe" +} + +variable "state_s3_key" { + default = "lokomotive/terraform.tfstate" +} + +variable "state_s3_region" { + default = "eu-central-1" +} + +backend "s3" { + bucket = var.state_s3_bucket + key = var.state_s3_key + region = var.state_s3_region + dynamodb_table = var.lock_dynamodb_table +} + +cluster "aks" { + asset_dir = pathexpand(var.asset_dir) + cluster_name = var.cluster_name + + subscription_id = var.subscription_id + tenant_id = var.tenant_id + location = var.location + resource_group_name = var.resource_group_name + application_name = var.application_name + manage_resource_group = var.manage_resource_group + + worker_pool "default" { + vm_size = var.workers_type + count = var.workers_count + } +} + +component "prometheus-operator" { + grafana_admin_password = var.grafana_admin_password + disable_webhooks = true + + monitor { + etcd = false + kube_controller_manager = false + kube_scheduler = false + kube_proxy = false + kubelet = false + } + + coredns { + selector = { + "k8s-app" = "kube-dns", + } + } +} + +component "cert-manager" { + email = var.cert_manager_email + service_monitor = true +} + +component "contour" { + service_monitor = true +} diff --git a/examples/aks-testing/cluster.lokocfg b/examples/aks-testing/cluster.lokocfg new file mode 100644 index 000000000..22d3b64d7 --- /dev/null +++ b/examples/aks-testing/cluster.lokocfg @@ -0,0 +1,78 @@ +variable "subscription_id" {} +variable "tenant_id" {} +variable "grafana_admin_password" {} +variable "resource_group_name" {} +variable "cert_manager_email" {} + +variable "manage_resource_group" { + default = true +} + +variable "application_name" { + default = "" +} + +variable "asset_dir" { + default = "./lokomotive-assets" +} + +variable "cluster_name" { + default = "lokomotive-cluster" +} + +variable "workers_count" { + default = 1 +} + +variable "workers_type" { + default = "Standard_D2_v2" +} + +variable "location" { + default = "West Europe" +} + +cluster "aks" { + asset_dir = pathexpand(var.asset_dir) + cluster_name = var.cluster_name + + subscription_id = var.subscription_id + tenant_id = var.tenant_id + location = var.location + resource_group_name = var.resource_group_name + application_name = var.application_name + manage_resource_group = var.manage_resource_group + + worker_pool "default" { + vm_size = var.workers_type + count = var.workers_count + } +} + +component "prometheus-operator" { + grafana_admin_password = var.grafana_admin_password + disable_webhooks = true + + monitor { + etcd = false + kube_controller_manager = false + kube_scheduler = false + kube_proxy = false + kubelet = false + } + + coredns { + selector = { + "k8s-app" = "kube-dns", + } + } +} + +component "cert-manager" { + email = var.cert_manager_email + service_monitor = true +} + +component "contour" { + service_monitor = true +} diff --git a/pkg/platform/aks/aks.go b/pkg/platform/aks/aks.go new file mode 100644 index 000000000..38042c72f --- /dev/null +++ b/pkg/platform/aks/aks.go @@ -0,0 +1,346 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package aks is a Platform implementation for creating a Kubernetes cluster using +// Azure AKS. +package aks + +import ( + "fmt" + "os" + "path/filepath" + "text/template" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/mitchellh/go-homedir" + + "github.com/kinvolk/lokomotive/pkg/platform" + "github.com/kinvolk/lokomotive/pkg/platform/util" + "github.com/kinvolk/lokomotive/pkg/terraform" +) + +// workerPool defines "worker_pool" block. +type workerPool struct { + // Label field. + Name string `hcl:"name,label"` + + // Block properties. + Count int `hcl:"count,optional"` + VMSize string `hcl:"vm_size,optional"` + Labels map[string]string `hcl:"labels,optional"` + Taints []string `hcl:"taints,optional"` +} + +// config defines "cluster" block for AKS. +type config struct { + AssetDir string `hcl:"asset_dir,optional"` + ClusterName string `hcl:"cluster_name,optional"` + Tags map[string]string `hcl:"tags,optional"` + + // Azure specific. + TenantID string `hcl:"tenant_id,optional"` + SubscriptionID string `hcl:"subscription_id,optional"` + ClientID string `hcl:"client_id,optional"` + ClientSecret string `hcl:"client_secret,optional"` + + Location string `hcl:"location,optional"` + + // ApplicationName for created service principal. + ApplicationName string `hcl:"application_name,optional"` + + ResourceGroupName string `hcl:"resource_group_name,optional"` + ManageResourceGroup bool `hcl:"manage_resource_group,optional"` + + WorkerPools []workerPool `hcl:"worker_pool,block"` +} + +const ( + name = "aks" + + // Environment variables used to load sensitive parts of the configuration. + clientIDEnv = "LOKOMOTIVE_AKS_CLIENT_ID" + clientSecretEnv = "LOKOMOTIVE_AKS_CLIENT_SECRET" // #nosec G101 + subscriptionIDEnv = "LOKOMOTIVE_AKS_SUBSCRIPTION_ID" + tenantIDEnv = "LOKOMOTIVE_AKS_TENANT_ID" +) + +// init registers AKS as a platform. +func init() { //nolint:gochecknoinits + c := &config{ + Location: "West Europe", + ManageResourceGroup: true, + } + + platform.Register(name, c) +} + +// LoadConfig loads configuration values into the config struct from given HCL configuration. +func (c *config) LoadConfig(configBody *hcl.Body, evalContext *hcl.EvalContext) hcl.Diagnostics { + if configBody == nil { + emptyConfig := hcl.EmptyBody() + configBody = &emptyConfig + } + + if d := gohcl.DecodeBody(*configBody, evalContext, c); d.HasErrors() { + return d + } + + return c.checkValidConfig() +} + +// checkValidConfig validates cluster configuration. +func (c *config) checkValidConfig() hcl.Diagnostics { + var d hcl.Diagnostics + + d = append(d, c.checkNotEmptyWorkers()...) + d = append(d, c.checkWorkerPoolNamesUnique()...) + d = append(d, c.checkWorkerPools()...) + d = append(d, c.checkCredentials()...) + d = append(d, c.checkRequiredFields()...) + + return d +} + +// checkWorkerPools validates all configured worker pool fields. +func (c *config) checkWorkerPools() hcl.Diagnostics { + var d hcl.Diagnostics + + for _, w := range c.WorkerPools { + if w.VMSize == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("pool %q: VMSize field can't be empty", w.Name), + }) + } + + if w.Count <= 0 { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("pool %q: count must be bigger than 0", w.Name), + }) + } + } + + return d +} + +// checkRequiredFields checks if that all required fields are populated in the top level configuration. +func (c *config) checkRequiredFields() hcl.Diagnostics { + var d hcl.Diagnostics + + if c.SubscriptionID == "" && os.Getenv(subscriptionIDEnv) == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "cannot find the Azure subscription ID", + Detail: fmt.Sprintf("%q field is empty and %q environment variable "+ + "is not defined. At least one of these should be defined", + "SubscriptionID", subscriptionIDEnv), + }) + } + + if c.TenantID == "" && os.Getenv(tenantIDEnv) == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "cannot find the Azure client ID", + Detail: fmt.Sprintf("%q field is empty and %q environment variable "+ + "is not defined. At least one of these should be defined", "TenantID", tenantIDEnv), + }) + } + + f := map[string]string{ + "AssetDir": c.AssetDir, + "ClusterName": c.ClusterName, + "ResourceGroupName": c.ResourceGroupName, + } + + for k, v := range f { + if v == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("field %q can't be empty", k), + }) + } + } + + return d +} + +// checkCredentials checks if credentials are correctly defined. +func (c *config) checkCredentials() hcl.Diagnostics { + var d hcl.Diagnostics + + // If the application name is defined, we assume that we work as a highly privileged + // account which has permissions to create new Azure AD application, so Client ID + // and Client Secret fields are not needed. + if c.ApplicationName != "" { + if c.ClientID != "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "ClientID and ApplicationName are mutually exclusive", + }) + } + + if c.ClientSecret != "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "ClientSecret and ApplicationName are mutually exclusive", + }) + } + + return d + } + + if c.ClientSecret == "" && os.Getenv(clientSecretEnv) == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "cannot find the Azure client secret", + Detail: fmt.Sprintf("%q field is empty and %q environment variable "+ + "is not defined. At least one of these should be defined", "ClientSecret", clientSecretEnv), + }) + } + + if c.ClientID == "" && os.Getenv(clientIDEnv) == "" { + d = append(d, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "cannot find the Azure client ID", + Detail: fmt.Sprintf("%q field is empty and %q environment variable is "+ + "not defined. At least one of these should be defined", "ClientID", clientIDEnv), + }) + } + + return d +} + +// checkNotEmptyWorkers checks if the cluster has at least 1 node pool defined. +func (c *config) checkNotEmptyWorkers() hcl.Diagnostics { + var diagnostics hcl.Diagnostics + + if len(c.WorkerPools) == 0 { + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "At least one worker pool must be defined", + Detail: "Make sure to define at least one worker pool block in your cluster block", + }) + } + + return diagnostics +} + +// checkWorkerPoolNamesUnique verifies that all worker pool names are unique. +func (c *config) checkWorkerPoolNamesUnique() hcl.Diagnostics { + var diagnostics hcl.Diagnostics + + dup := make(map[string]bool) + + for _, w := range c.WorkerPools { + if !dup[w.Name] { + dup[w.Name] = true + continue + } + + // It is duplicated. + diagnostics = append(diagnostics, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Worker pool names should be unique", + Detail: fmt.Sprintf("Worker pool '%v' is duplicated", w.Name), + }) + } + + return diagnostics +} + +// Meta is part of Platform interface and returns common information about the platform configuration. +func (c *config) Meta() platform.Meta { + nodes := 0 + for _, workerpool := range c.WorkerPools { + nodes += workerpool.Count + } + + return platform.Meta{ + AssetDir: c.AssetDir, + ExpectedNodes: nodes, + Managed: true, + } +} + +// Apply creates AKS infrastructure via Terraform. +func (c *config) Apply(ex *terraform.Executor) error { + if err := c.Initialize(ex); err != nil { + return err + } + + return ex.Apply() +} + +// Destroy destroys AKS infrastructure via Terraform. +func (c *config) Destroy(ex *terraform.Executor) error { + if err := c.Initialize(ex); err != nil { + return err + } + + return ex.Destroy() +} + +// Initialize creates Terrafrom files required for AKS. +func (c *config) Initialize(ex *terraform.Executor) error { + assetDir, err := homedir.Expand(c.AssetDir) + if err != nil { + return err + } + + terraformRootDir := terraform.GetTerraformRootDir(assetDir) + + return createTerraformConfigFile(c, terraformRootDir) +} + +// createTerraformConfigFiles create Terraform config files in given directory. +func createTerraformConfigFile(cfg *config, terraformRootDir string) error { + t := template.Must(template.New("t").Parse(terraformConfigTmpl)) + + path := filepath.Join(terraformRootDir, "cluster.tf") + + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("failed to create file %q: %w", path, err) + } + + util.AppendTags(&cfg.Tags) + + if cfg.ClientSecret == "" { + cfg.ClientSecret = os.Getenv(clientSecretEnv) + } + + if cfg.SubscriptionID == "" { + cfg.SubscriptionID = os.Getenv(subscriptionIDEnv) + } + + if cfg.ClientID == "" { + cfg.ClientID = os.Getenv(clientIDEnv) + } + + if cfg.TenantID == "" { + cfg.TenantID = os.Getenv(tenantIDEnv) + } + + if err := t.Execute(f, cfg); err != nil { + return fmt.Errorf("failed to write template to file %q: %w", path, err) + } + + if err := f.Close(); err != nil { + return fmt.Errorf("failed closing file %q: %w", path, err) + } + + return nil +} diff --git a/pkg/platform/aks/aks_test.go b/pkg/platform/aks/aks_test.go new file mode 100644 index 000000000..eca21dec2 --- /dev/null +++ b/pkg/platform/aks/aks_test.go @@ -0,0 +1,315 @@ +package aks + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclparse" + + lokoconfig "github.com/kinvolk/lokomotive/pkg/config" +) + +const ( + testWorkerCount = 1 +) + +// createTerraformConfigFile() +func TestCreateTerraformConfigFile(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "lokoctl-tests-") + if err != nil { + t.Fatalf("creating tmp dir should succeed, got: %v", err) + } + + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + t.Logf("failed to remove temp dir %q: %v", tmpDir, err) + } + }() + + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + VMSize: "bar", + Count: testWorkerCount, + }, + }, + } + + if err := createTerraformConfigFile(c, tmpDir); err != nil { + t.Fatalf("creating Terraform config files should succeed, got: %v", err) + } +} + +func TestCreateTerraformConfigFileNoWorkerPools(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "lokoctl-tests-") + if err != nil { + t.Fatalf("creating tmp dir should succeed, got: %v", err) + } + + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + t.Logf("failed to remove temp dir %q: %v", tmpDir, err) + } + }() + + c := &config{} + + if err := createTerraformConfigFile(c, tmpDir); err == nil { + t.Fatalf("creating Terraform config files should fail if there is no worker pools defined") + } +} + +func TestCreateTerraformConfigFileNonExistingPath(t *testing.T) { + c := &config{} + + if err := createTerraformConfigFile(c, "/nonexisting"); err == nil { + t.Fatalf("creating Terraform config files in non-existing path should fail") + } +} + +// Meta() +func TestMeta(t *testing.T) { + assetDir := "foo" + + moreWorkers := 3 + + c := &config{ + AssetDir: assetDir, + WorkerPools: []workerPool{ + { + Count: testWorkerCount, + }, + { + Count: moreWorkers, + }, + }, + } + + expectedNodes := 4 + if e := c.Meta().ExpectedNodes; e != expectedNodes { + t.Errorf("Meta should count workers from all pools. Expected %d, got %d", expectedNodes, e) + } + + if a := c.Meta().AssetDir; a != assetDir { + t.Errorf("Meta should return configured asset dir. Expected %q, got %q", assetDir, a) + } +} + +// checkWorkerPoolNamesUnique() +func TestCheckWorkerPoolNamesUniqueDuplicated(t *testing.T) { + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + }, + { + Name: "foo", + }, + }, + } + + if d := c.checkWorkerPoolNamesUnique(); !d.HasErrors() { + t.Fatalf("should return error when worker pools are duplicated") + } +} + +func TestCheckWorkerPoolNamesUnique(t *testing.T) { + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + }, + { + Name: "bar", + }, + }, + } + + if d := c.checkWorkerPoolNamesUnique(); d.HasErrors() { + t.Fatalf("should not return errors when pool names are unique, got: %v", d) + } +} + +// checkNotEmptyWorkers() +func TestNotEmptyWorkersEmpty(t *testing.T) { + c := &config{} + + if d := c.checkNotEmptyWorkers(); !d.HasErrors() { + t.Fatalf("should return error when there is no worker pool defined") + } +} + +func TestNotEmptyWorkers(t *testing.T) { + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + }, + }, + } + + if d := c.checkNotEmptyWorkers(); d.HasErrors() { + t.Fatalf("should not return errors when worker pool is not empty, got: %v", d) + } +} + +// checkConfiguration() +func TestCheckWorkerPoolNamesUniqueTest(t *testing.T) { + c := &config{ + WorkerPools: []workerPool{ + { + Name: "foo", + }, + { + Name: "bar", + }, + }, + } + + if d := c.checkWorkerPoolNamesUnique(); d.HasErrors() { + t.Fatalf("should not return errors when pool names are unique, got: %v", d) + } +} + +// checkCredentials() +func TestCheckCredentialsAppNameAndClientID(t *testing.T) { + c := &config{ + ApplicationName: "foo", + ClientID: "foo", + } + + if d := c.checkCredentials(); !d.HasErrors() { + t.Fatalf("should give error if both ApplicationName and ClientID fields are defined") + } +} + +func TestCheckCredentialsAppNameAndClientSecret(t *testing.T) { + c := &config{ + ApplicationName: "foo", + ClientSecret: "foo", + } + + if d := c.checkCredentials(); !d.HasErrors() { + t.Fatalf("should give error if both ApplicationName and ClientID fields are defined") + } +} + +func TestCheckCredentialsAppNameClientIDAndClientSecret(t *testing.T) { + c := &config{ + ApplicationName: "foo", + ClientID: "foo", + ClientSecret: "foo", + } + + expectedErrorCount := 2 + + if d := c.checkCredentials(); len(d) != expectedErrorCount { + t.Fatalf("should give errors for both conflicting ClientID and ClientSecret, got %v", d) + } +} + +func TestCheckCredentialsRequireSome(t *testing.T) { + c := &config{} + + if d := c.checkCredentials(); !d.HasErrors() { + t.Fatalf("should give error if both ApplicationName and ClientID fields are empty") + } +} + +func TestCheckCredentialsRequireClientIDWithClientSecret(t *testing.T) { + c := &config{ + ClientSecret: "foo", + } + + if d := c.checkCredentials(); !d.HasErrors() { + t.Fatalf("should give error if ClientSecret is defined and ClientID is empty") + } +} + +func TestCheckCredentialsReadClientSecretFromEnvironment(t *testing.T) { + if err := os.Setenv(clientSecretEnv, "1"); err != nil { + t.Fatalf("failed to set environment variable %q: %v", clientSecretEnv, err) + } + + defer func() { + if err := os.Setenv(clientSecretEnv, ""); err != nil { + t.Logf("failed unsetting environment variable %q: %v", clientSecretEnv, err) + } + }() + + c := &config{ + ClientID: "foo", + } + + if d := c.checkCredentials(); d.HasErrors() { + t.Fatalf("should read client secret from environment") + } +} + +// LoadConfig() +func loadConfigFromString(t *testing.T, c string) hcl.Diagnostics { + p := hclparse.NewParser() + + f, d := p.ParseHCL([]byte(c), "x.lokocfg") + if d.HasErrors() { + t.Fatalf("parsing HCL should succeed, got: %v", d) + } + + configBody := hcl.MergeFiles([]*hcl.File{f}) + + var rootConfig lokoconfig.RootConfig + + if d := gohcl.DecodeBody(configBody, nil, &rootConfig); d.HasErrors() { + t.Fatalf("decoding root config should succeed, got: %v", d) + } + + cc := &config{} + + return cc.LoadConfig(&rootConfig.Cluster.Config, &hcl.EvalContext{}) +} + +func TestLoadConfig(t *testing.T) { + c := ` +cluster "aks" { + asset_dir = "/fooo" + client_id = "bar" + client_secret = "foo" + cluster_name = "mycluster" + resource_group_name = "test" + subscription_id = "foo" + tenant_id = "bar" + + worker_pool "foo" { + count = 1 + vm_size = "foo" + } +} +` + if d := loadConfigFromString(t, c); d.HasErrors() { + t.Fatalf("valid config should not return error, got: %v", d) + } +} + +func TestLoadConfigEmpty(t *testing.T) { + c := &config{} + + if d := c.LoadConfig(nil, &hcl.EvalContext{}); !d.HasErrors() { + t.Fatalf("empty config should return error, got: %v", d) + } +} + +func TestLoadConfigBadHCL(t *testing.T) { + c := ` +cluster "aks" { + not_defined_field = "doh" +} +` + + if d := loadConfigFromString(t, c); !d.HasErrors() { + t.Fatalf("invalid HCL should return errors") + } +} diff --git a/pkg/platform/aks/template.go b/pkg/platform/aks/template.go new file mode 100644 index 000000000..49755ecd6 --- /dev/null +++ b/pkg/platform/aks/template.go @@ -0,0 +1,225 @@ +// Copyright 2020 The Lokomotive Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aks + +var terraformConfigTmpl = `{{- define "resource_group_name" -}} +{{- if .ManageResourceGroup -}} +azurerm_resource_group.aks.name +{{- else -}} +"{{ .ResourceGroupName }}" +{{- end -}} +{{- end -}} + +{{- define "client_id" -}} +{{- if .ApplicationName -}} +azuread_application.aks.application_id +{{- else -}} +"{{ .ClientID }}" +{{- end -}} +{{- end -}} + +{{- define "client_secret" -}} +{{- if .ApplicationName -}} +azuread_application_password.aks.value +{{- else -}} +"{{ .ClientSecret }}" +{{- end -}} +{{- end -}} + +locals { + subscription_id = "{{ .SubscriptionID }}" + tenant_id = "{{ .TenantID }}" + application_name = "{{ .ApplicationName }}" + location = "{{ .Location }}" + resource_group_name = {{ template "resource_group_name" . }} + kubernetes_version = "1.16.7" + cluster_name = "{{ .ClusterName }}" + default_node_pool_name = "{{ (index .WorkerPools 0).Name }}" + default_node_pool_vm_size = "{{ (index .WorkerPools 0).VMSize }}" + default_node_pool_count = {{ (index .WorkerPools 0).Count }} + client_id = {{ template "client_id" . }} + client_secret = {{ template "client_secret" . }} +} + +provider "azurerm" { + version = "2.2.0" + + # https://github.com/terraform-providers/terraform-provider-azurerm/issues/5893 + features {} +} + +provider "local" { + version = "1.4.0" +} + +{{- if .ApplicationName }} +provider "azuread" { + version = "0.8.0" +} + +provider "random" { + version = "2.2.1" +} + +resource "azuread_application" "aks" { + name = local.application_name +} + +resource "azuread_service_principal" "aks" { + application_id = azuread_application.aks.application_id + + {{- if .Tags }} + tags = [ + {{- range $k, $v := .Tags }} + "{{ $k }}={{ $v }}", + {{- end }} + ] + {{- end }} +} + +resource "random_string" "password" { + length = 16 + special = true + + override_special = "/@\" " +} + +resource "azuread_application_password" "aks" { + application_object_id = azuread_application.aks.object_id + value = random_string.password.result + end_date_relative = "86000h" +} + +resource "azurerm_role_assignment" "aks" { + scope = "/subscriptions/${local.subscription_id}" + role_definition_name = "Contributor" + principal_id = azuread_service_principal.aks.id +} +{{- end }} + +{{- if .ManageResourceGroup }} +resource "azurerm_resource_group" "aks" { + name = "{{ .ResourceGroupName }}" + location = local.location + + {{- if .Tags }} + tags = { + {{- range $k, $v := .Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} +{{- end }} + +resource "azurerm_kubernetes_cluster" "aks" { + name = local.cluster_name + location = local.location + resource_group_name = local.resource_group_name + kubernetes_version = local.kubernetes_version + dns_prefix = local.cluster_name + + default_node_pool { + name = local.default_node_pool_name + vm_size = local.default_node_pool_vm_size + node_count = local.default_node_pool_count + + {{- if (index .WorkerPools 0).Labels }} + node_labels = { + {{- range $k, $v := (index .WorkerPools 0).Labels }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} + + {{- if (index .WorkerPools 0).Taints }} + node_taints = [ + {{- range (index .WorkerPools 0).Taints }} + "{{ . }}", + {{- end }} + ] + {{- end }} + } + + role_based_access_control { + enabled = true + } + + service_principal { + client_id = local.client_id + client_secret = local.client_secret + } + + network_profile { + network_plugin = "kubenet" + network_policy = "calico" + } + + {{- if .Tags }} + tags = { + {{- range $k, $v := .Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} + +{{ range $index, $pool := (slice .WorkerPools 1) }} +resource "azurerm_kubernetes_cluster_node_pool" "worker-{{ $pool.Name }}" { + name = "{{ $pool.Name }}" + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id + vm_size = "{{ $pool.VMSize }}" + node_count = "{{ $pool.Count }}" + + {{- if $pool.Labels }} + node_labels = { + {{- range $k, $v := $pool.Labels }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} + + + {{- if $pool.Taints }} + node_taints = [ + {{- range $pool.Taints }} + "{{ . }}", + {{- end }} + ] + {{- end }} + + + {{- if $.Tags }} + tags = { + {{- range $k, $v := $.Tags }} + "{{ $k }}" = "{{ $v }}" + {{- end }} + } + {{- end }} +} +{{- end }} + +resource "local_file" "kubeconfig" { + sensitive_content = azurerm_kubernetes_cluster.aks.kube_config_raw + filename = "../cluster-assets/auth/kubeconfig" +} + +# Stub output which indicates, that Terraform ran at least once. +# Used when checking, if we should ask user for confirmation when +# applying changes to the cluster. +output "initialized" { + value = true +} +` diff --git a/test/components/cert-manager/cert-manager_test.go b/test/components/cert-manager/cert-manager_test.go index ff53179c6..3d5388179 100644 --- a/test/components/cert-manager/cert-manager_test.go +++ b/test/components/cert-manager/cert-manager_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package certmanager diff --git a/test/components/contour/contour_test.go b/test/components/contour/contour_test.go index c4546aeb2..7b2545ef6 100644 --- a/test/components/contour/contour_test.go +++ b/test/components/contour/contour_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package contour diff --git a/test/components/external-dns/external-dns_test.go b/test/components/external-dns/external-dns_test.go index 7c1b2b8d7..b57d3da9f 100644 --- a/test/components/external-dns/external-dns_test.go +++ b/test/components/external-dns/external-dns_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package externaldns diff --git a/test/components/httpbin/httpbin_test.go b/test/components/httpbin/httpbin_test.go index 985eb7f1c..2aa179bcb 100644 --- a/test/components/httpbin/httpbin_test.go +++ b/test/components/httpbin/httpbin_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package httpbin diff --git a/test/components/prometheus-operator/prometheus_operator_test.go b/test/components/prometheus-operator/prometheus_operator_test.go index 2317394a1..cdee0cff6 100644 --- a/test/components/prometheus-operator/prometheus_operator_test.go +++ b/test/components/prometheus-operator/prometheus_operator_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build e2e package prometheusoperator diff --git a/test/components/util/util.go b/test/components/util/util.go index 19633a0c0..c052cab84 100644 --- a/test/components/util/util.go +++ b/test/components/util/util.go @@ -337,6 +337,9 @@ const ( // PlatformBaremetal is for Baremetal PlatformBaremetal = "baremetal" + + // PlatformAKS is for AKS. + PlatformAKS = "aks" ) // IsPlatformSupported takes in the test object and the list of supported platforms. The function diff --git a/test/monitoring/components_alerts_test.go b/test/monitoring/components_alerts_test.go index 47351b8d6..35bfad2ef 100644 --- a/test/monitoring/components_alerts_test.go +++ b/test/monitoring/components_alerts_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build poste2e package monitoring diff --git a/test/monitoring/components_metrics_test.go b/test/monitoring/components_metrics_test.go index ea2c8ecf7..e27cd721d 100644 --- a/test/monitoring/components_metrics_test.go +++ b/test/monitoring/components_metrics_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build poste2e package monitoring @@ -31,6 +31,11 @@ import ( //nolint:funlen func testComponentsPrometheusMetrics(t *testing.T, v1api v1.API) { + selfHostedPlatforms := []testutil.Platform{ + testutil.PlatformPacket, + testutil.PlatformAWS, + } + testCases := []struct { componentName string query string @@ -47,18 +52,22 @@ func testComponentsPrometheusMetrics(t *testing.T, v1api v1.API) { { componentName: "kube-scheduler", query: "scheduler_schedule_attempts_total", + platforms: selfHostedPlatforms, }, { componentName: "kube-controller-manager", query: "workqueue_work_duration_seconds_bucket", + platforms: selfHostedPlatforms, }, { componentName: "kube-proxy", query: "kubeproxy_sync_proxy_rules_duration_seconds_bucket", + platforms: selfHostedPlatforms, }, { componentName: "kubelet", query: "kubelet_running_pod_count", + platforms: selfHostedPlatforms, }, { componentName: "metallb", @@ -68,12 +77,12 @@ func testComponentsPrometheusMetrics(t *testing.T, v1api v1.API) { { componentName: "contour", query: "contour_dagrebuild_timestamp", - platforms: []testutil.Platform{testutil.PlatformPacket, testutil.PlatformAWS}, + platforms: []testutil.Platform{testutil.PlatformPacket, testutil.PlatformAWS, testutil.PlatformAKS}, }, { componentName: "cert-manager", query: "certmanager_controller_sync_call_count", - platforms: []testutil.Platform{testutil.PlatformPacket, testutil.PlatformAWS}, + platforms: []testutil.Platform{testutil.PlatformPacket, testutil.PlatformAWS, testutil.PlatformAKS}, }, } diff --git a/test/monitoring/monitoring_test.go b/test/monitoring/monitoring_test.go index 1f004bdb6..4ca5d61d8 100644 --- a/test/monitoring/monitoring_test.go +++ b/test/monitoring/monitoring_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build poste2e package monitoring diff --git a/test/monitoring/scrape_targets_test.go b/test/monitoring/scrape_targets_test.go index 9f15b5b5f..9f74096fe 100644 --- a/test/monitoring/scrape_targets_test.go +++ b/test/monitoring/scrape_targets_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aws packet +// +build aws packet aks // +build poste2e package monitoring