Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
Add AKS platform support
Browse files Browse the repository at this point in the history
Refs #215 #216

Signed-off-by: Mateusz Gozdek <mateusz@kinvolk.io>
  • Loading branch information
invidian committed Mar 27, 2020
1 parent 8a73c24 commit fde514d
Show file tree
Hide file tree
Showing 5 changed files with 544 additions and 0 deletions.
116 changes: 116 additions & 0 deletions ci/aks/aks-cluster.lokocfg.envsubst
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
variable "subscription_id" {
default = "$AZURE_SUBSCRIPTION_ID"
}

variable "tenant_id" {
default = "$AZURE_TENANT_ID"
}

variable "grafana_admin_password" {
default = "admin"
}

variable "resource_group_name" {
default = var.cluster_name
}

variable "application_name" {
default = var.cluster_name
}

variable "cert_manager_email" {
default = "$EMAIL"
}

variable "asset_dir" {
default = "~/lokoctl-assets"
}

variable "cluster_name" {
default = "$CLUSTER_ID"
}

variable "workers_count" {
default = 2
}

variable "workers_type" {
default = "Standard_D2_v2"
}

variable "location" {
default = "Germany West Central"
}

variable "worker_labels" {
default = {
"testing.io" = "yes",
"roleofnode" = "testing",
}
}

variable "aws_zone_id" {
default = "$AWS_DNS_ZONE_ID"
}

variable "aws_access_key_id" {
default = "$AWS_ACCESS_KEY_ID"
}

variable "aws_secret_access_key" {
default = "$AWS_SECRET_ACCESS_KEY"
}

cluster "aks" {
asset_dir = pathexpand(var.asset_dir)
cluster_name = var.cluster_name

subscription_id = var.subscription_id
tenant_id = var.tenant_id
location = var.location
resource_group_name = var.resource_group_name
application_name = var.application_name

worker_pool "default" {
vm_size = var.workers_type
count = var.workers_count
worker_labels = var.worker_labels
}
}

component "openebs-operator" {}

component "prometheus-operator" {
grafana_admin_password = var.grafana_admin_password
disable_webhooks = true
}

component "contour" {
ingress_hosts = [
"httpbin.${var.cluster_name}.${var.aws_dns_zone}",
]
service_monitor = true
}

component "cert-manager" {
email = var.cert_manager_email
service_monitor = true
}

component "external-dns" {
policy = "sync"
owner_id = var.cluster_name
aws {
zone_id = var.aws_zone_id
aws_access_key_id = var.aws_access_key_id
aws_secret_access_key = var.aws_secret_access_key
}

service_monitor = true
}

component "rook" {}

component "httpbin" {
ingress_host = "httpbin.${var.cluster_name}.${var.aws_dns_zone}"
}
1 change: 1 addition & 0 deletions cli/cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"github.com/spf13/viper"

// Register platforms by adding an anonymous import.
_ "github.com/kinvolk/lokomotive/pkg/platform/aks"
_ "github.com/kinvolk/lokomotive/pkg/platform/aws"
_ "github.com/kinvolk/lokomotive/pkg/platform/baremetal"
_ "github.com/kinvolk/lokomotive/pkg/platform/packet"
Expand Down
56 changes: 56 additions & 0 deletions examples/aks-testing/cluster.lokocfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
variable "subscription_id" {}
variable "tenant_id" {}
variable "grafana_admin_password" {}
variable "resource_group_name" {}
variable "application_name" {}
variable "cert_manager_email" {}

variable "asset_dir" {
default = "./lokomotive-assets"
}

variable "cluster_name" {
default = "lokomotive-cluster"
}

variable "workers_count" {
default = 1
}

variable "workers_type" {
default = "Standard_D2_v2"
}

variable "location" {
default = "West Europe"
}

cluster "aks" {
asset_dir = pathexpand(var.asset_dir)
cluster_name = var.cluster_name

subscription_id = var.subscription_id
tenant_id = var.tenant_id
location = var.location
resource_group_name = var.resource_group_name
application_name = var.application_name

worker_pool "default" {
vm_size = var.workers_type
count = var.workers_count
}
}

component "prometheus-operator" {
grafana_admin_password = var.grafana_admin_password
disable_webhooks = true
}

component "cert-manager" {
email = var.cert_manager_email
service_monitor = true
}

component "contour" {
service_monitor = true
}
190 changes: 190 additions & 0 deletions pkg/platform/aks/aks.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
// Copyright 2020 The Lokomotive Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Package aks is a Platform implementation for creating Kubernetes cluster using
// Azure AKS.
package aks

import (
"fmt"
"os"
"path/filepath"
"text/template"

"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/gohcl"
"github.com/mitchellh/go-homedir"

"github.com/kinvolk/lokomotive/pkg/platform"
"github.com/kinvolk/lokomotive/pkg/platform/util"
"github.com/kinvolk/lokomotive/pkg/terraform"
)

type workerPool struct {
Name string `hcl:"name,label"`
VMSize string `hcl:"vm_size"`
Count int `hcl:"count"`
Labels map[string]string `hcl:"labels,optional"`
Taints []string `hcl:"taints,optional"`
}

type config struct {
AssetDir string `hcl:"asset_dir"`
ClusterName string `hcl:"cluster_name"`
Tags map[string]string `hcl:"tags,optional"`

// Azure specific.
SubscriptionID string `hcl:"subscription_id"`
TenantID string `hcl:"tenant_id"`
Location string `hcl:"location,optional"`

// ApplicationName for created service principal
ApplicationName string `hcl:"application_name"`
ResourceGroupName string `hcl:"resource_group_name"`

WorkerPools []workerPool `hcl:"worker_pool,block"`
}

// init registers aks as a platform.
func init() { //nolint:gochecknoinits
c := &config{
Location: "West Europe",
}

platform.Register("aks", c)
}

func (c *config) LoadConfig(configBody *hcl.Body, evalContext *hcl.EvalContext) hcl.Diagnostics {
if configBody == nil {
return hcl.Diagnostics{}
}

if diags := gohcl.DecodeBody(*configBody, evalContext, c); len(diags) != 0 {
return diags
}

return c.checkValidConfig()
}

// checkValidConfig validates cluster configuration.
func (c *config) checkValidConfig() hcl.Diagnostics {
var diagnostics hcl.Diagnostics

diagnostics = append(diagnostics, c.checkNotEmptyWorkers()...)
diagnostics = append(diagnostics, c.checkWorkerPoolNamesUnique()...)

return diagnostics
}

// checkNotEmptyWorkers checks if the cluster has at least 1 node pool defined.
func (c *config) checkNotEmptyWorkers() hcl.Diagnostics {
var diagnostics hcl.Diagnostics

if len(c.WorkerPools) == 0 {
diagnostics = append(diagnostics, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "At least one worker pool must be defined",
Detail: "Make sure to define at least one worker pool block in your cluster block",
})
}

return diagnostics
}

// checkWorkerPoolNamesUnique verifies that all worker pool names are unique.
func (c *config) checkWorkerPoolNamesUnique() hcl.Diagnostics {
var diagnostics hcl.Diagnostics

dup := make(map[string]bool)

for _, w := range c.WorkerPools {
if !dup[w.Name] {
dup[w.Name] = true
continue
}

// It is duplicated.
diagnostics = append(diagnostics, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Worker pools name should be unique",
Detail: fmt.Sprintf("Worker pool '%v' is duplicated", w.Name),
})
}

return diagnostics
}

// Meta is part of Platform interface and returns common information about the platform configuration.
func (c *config) Meta() platform.Meta {
nodes := 0
for _, workerpool := range c.WorkerPools {
nodes += workerpool.Count
}

return platform.Meta{
AssetDir: c.AssetDir,
ExpectedNodes: nodes,
Managed: true,
}
}

func (c *config) Apply(ex *terraform.Executor) error {
if err := c.Initialize(ex); err != nil {
return err
}

return ex.Apply()
}

func (c *config) Destroy(ex *terraform.Executor) error {
if err := c.Initialize(ex); err != nil {
return err
}

return ex.Destroy()
}

func (c *config) Initialize(ex *terraform.Executor) error {
assetDir, err := homedir.Expand(c.AssetDir)
if err != nil {
return err
}

terraformRootDir := terraform.GetTerraformRootDir(assetDir)

return createTerraformConfigFile(c, terraformRootDir)
}

func createTerraformConfigFile(cfg *config, terraformRootDir string) error {
t := template.Must(template.New("t").Parse(terraformConfigTmpl))

path := filepath.Join(terraformRootDir, "cluster.tf")

f, err := os.Create(path)
if err != nil {
return fmt.Errorf("failed to create file %q: %w", path, err)
}

util.AppendTags(&cfg.Tags)

if err := t.Execute(f, cfg); err != nil {
return fmt.Errorf("failed to write template to file %q: %w", path, err)
}

if err := f.Close(); err != nil {
return fmt.Errorf("failed closing file %q: %w", path, err)
}

return nil
}
Loading

0 comments on commit fde514d

Please sign in to comment.