Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

Commit

Permalink
Merge pull request #1359 from kinvolk/surajssd/add-worker-pool-region
Browse files Browse the repository at this point in the history
EM: Add worker pool specific facility param
  • Loading branch information
surajssd authored Mar 1, 2021
2 parents 87e9c0d + da95497 commit 7d39863
Show file tree
Hide file tree
Showing 19 changed files with 374 additions and 92 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ module "bootkube" {
etcd_endpoints = packet_device.controllers.*.access_private_ipv4

# Select private Packet NIC by using the can-reach Calico autodetection option with the first
# host in our private CIDR.
network_ip_autodetection_method = "can-reach=${cidrhost(var.node_private_cidr, 1)}"
# controller's private IP.
network_ip_autodetection_method = "can-reach=${packet_device.controllers[0].access_private_ipv4}"

pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,10 @@ resource "local_file" "calico_host_protection" {
}
],
management_cidrs = var.management_cidrs
cluster_cidrs = [
var.node_private_cidr,
cluster_cidrs = concat([
var.pod_cidr,
var.service_cidr
],
], var.node_private_cidrs),
})

filename = "${var.asset_dir}/charts/kube-system/calico-host-protection.yaml"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,9 +134,9 @@ variable "management_cidrs" {
type = list(string)
}

variable "node_private_cidr" {
description = "Private IPv4 CIDR of the nodes used to allow inter-node traffic"
type = string
variable "node_private_cidrs" {
description = "List of private IPv4 CIDRs of the nodes used to allow inter-node traffic"
type = list(string)
}

variable "enable_aggregation" {
Expand Down
8 changes: 4 additions & 4 deletions ci/packet/packet-cluster.lokocfg.envsubst
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,14 @@ EOF

project_id = "$PACKET_PROJECT_ID"

ssh_pubkeys = ["$PUB_KEY"]
management_cidrs = ["0.0.0.0/0"]
node_private_cidr = "10.0.0.0/8"
ssh_pubkeys = ["$PUB_KEY"]
management_cidrs = ["0.0.0.0/0"]
node_private_cidrs = ["10.0.0.0/8"]

worker_pool "pool-1" {
count = 2
node_type = "c2.medium.x86"
labels = {
labels = {
"testing.io" = "yes",
"roleofnode" = "testing",
}
Expand Down
6 changes: 3 additions & 3 deletions ci/packet_arm/packet_arm-cluster.lokocfg.envsubst
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@ EOF
os_channel = "alpha"
controller_type = "c2.large.arm"

ssh_pubkeys = ["$PUB_KEY"]
management_cidrs = ["0.0.0.0/0"]
node_private_cidr = "10.0.0.0/8"
ssh_pubkeys = ["$PUB_KEY"]
management_cidrs = ["0.0.0.0/0"]
node_private_cidrs = ["10.0.0.0/8"]

worker_pool "pool-1" {
count = 1
Expand Down
12 changes: 6 additions & 6 deletions ci/packet_fluo/packet_fluo-cluster.lokocfg.envsubst
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ EOF

project_id = "$PACKET_PROJECT_ID"

ssh_pubkeys = ["$PUB_KEY"]
management_cidrs = ["0.0.0.0/0"]
node_private_cidr = "10.0.0.0/8"
ssh_pubkeys = ["$PUB_KEY"]
management_cidrs = ["0.0.0.0/0"]
node_private_cidrs = ["10.0.0.0/8"]

worker_pool "general" {
count = 1
Expand All @@ -44,7 +44,7 @@ EOF
}

worker_pool "storage" {
count = 3
count = 3
node_type = "c2.medium.x86"

labels = {
Expand Down Expand Up @@ -77,7 +77,7 @@ component "rook" {
}

component "rook-ceph" {
monitor_count = 3
monitor_count = 3
enable_toolbox = true

node_affinity {
Expand All @@ -93,7 +93,7 @@ component "rook-ceph" {
}

storage_class {
enable = true
enable = true
default = true
}
}
Expand Down
13 changes: 11 additions & 2 deletions cli/cmd/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"fmt"
"path/filepath"

"github.com/hashicorp/hcl/v2"
"github.com/mitchellh/go-homedir"
log "github.com/sirupsen/logrus"
"helm.sh/helm/v3/pkg/action"
Expand Down Expand Up @@ -58,11 +59,19 @@ func (cc clusterConfig) initialize(contextLogger *log.Entry) (*cluster, error) {
}

p, diags := getConfiguredPlatform(lokoConfig, true)
if diags.HasErrors() {
for _, diagnostic := range diags {
for _, diagnostic := range diags {
if diagnostic.Severity == hcl.DiagWarning {
contextLogger.Warn(diagnostic.Error())

continue
}

if diagnostic.Severity == hcl.DiagError {
contextLogger.Error(diagnostic.Error())
}
}

if diags.HasErrors() {
return nil, fmt.Errorf("loading platform configuration")
}

Expand Down
42 changes: 21 additions & 21 deletions cli/cmd/cluster/utils_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,13 +172,13 @@ func TestGetKubeconfigSourceFlag(t *testing.T) {
configFile: `cluster "packet" {
asset_dir = "/bad"
cluster_name = ""
controller_count = 0
facility = ""
management_cidrs = []
node_private_cidr = ""
project_id = ""
ssh_pubkeys = []
cluster_name = ""
controller_count = 0
facility = ""
management_cidrs = []
node_private_cidrs = ["10.10.10.10"]
project_id = ""
ssh_pubkeys = []
dns {
provider = ""
zone = ""
Expand Down Expand Up @@ -214,13 +214,13 @@ func TestGetKubeconfigSourceConfigFile(t *testing.T) {
configFile: `cluster "packet" {
asset_dir = "/foo"
cluster_name = ""
controller_count = 0
facility = ""
management_cidrs = []
node_private_cidr = ""
project_id = ""
ssh_pubkeys = []
cluster_name = ""
controller_count = 0
facility = ""
management_cidrs = []
node_private_cidrs = ["10.10.10.10"]
project_id = ""
ssh_pubkeys = []
dns {
provider = ""
zone = ""
Expand Down Expand Up @@ -277,13 +277,13 @@ func TestGetKubeconfigFromAssetsDir(t *testing.T) {
configFile: fmt.Sprintf(`cluster "packet" {
asset_dir = "%s"
cluster_name = ""
controller_count = 0
facility = ""
management_cidrs = []
node_private_cidr = ""
project_id = ""
ssh_pubkeys = []
cluster_name = ""
controller_count = 0
facility = ""
management_cidrs = []
node_private_cidrs = ["10.10.10.10"]
project_id = ""
ssh_pubkeys = []
dns {
provider = ""
zone = ""
Expand Down
2 changes: 0 additions & 2 deletions docs/configuration-reference/platforms/baremetal.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,6 @@ variable "controller_names" {}
variable "worker_domains" {}
variable "worker_macs" {}
variable "worker_names" {}
variable "management_cidrs" {}
variable "node_private_cidr" {}
variable "state_s3_bucket" {}
variable "lock_dynamodb_table" {}
variable "oidc_issuer_url" {}
Expand Down
11 changes: 8 additions & 3 deletions docs/configuration-reference/platforms/packet.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,15 @@ variable "route53_zone_id" {}
variable "packet_project_id" {}
variable "ssh_public_keys" {}
variable "management_cidrs" {}
variable "node_private_cidr" {}
variable "node_private_cidrs" {}
variable "state_s3_bucket" {}
variable "lock_dynamodb_table" {}
variable "oidc_issuer_url" {}
variable "oidc_client_id" {}
variable "oidc_username_claim" {}
variable "oidc_groups_claim" {}
variable "worker_clc_snippets" {}
variable "worker_pool_facility" {}
backend "s3" {
bucket = var.state_s3_bucket
Expand Down Expand Up @@ -89,7 +90,7 @@ cluster "packet" {
management_cidrs = var.management_cidrs
node_private_cidr = var.node_private_cidr
node_private_cidrs = var.node_private_cidrs
cluster_domain_suffix = "cluster.local"
Expand Down Expand Up @@ -144,6 +145,8 @@ cluster "packet" {
disable_bgp = false
facility = var.worker_pool_facility
node_type = var.workers_type
os_channel = "stable"
Expand Down Expand Up @@ -219,7 +222,8 @@ node_type = var.custom_default_worker_type
| `os_version` | Flatcar Container Linux version to install. Version such as "2303.3.1" or "current". | "current" | string | false |
| `ipxe_script_url` | Boot via iPXE. Required for arm64. | - | string | false |
| `management_cidrs` | List of IPv4 CIDRs authorized to access or manage the cluster. Example ["0.0.0.0/0"] to allow all. | - | list(string) | true |
| `node_private_cidr` | Private IPv4 CIDR of the nodes used to allow inter-node traffic. Example "10.0.0.0/8" | - | string | true |
| `node_private_cidr` | (Deprecated, use `node_private_cidrs` instead) Private IPv4 CIDR of the nodes used to allow inter-node traffic. Example "10.0.0.0/8". | - | string | true |
| `node_private_cidrs` | List of Private IPv4 CIDRs of the nodes used to allow inter-node traffic. Example ["10.0.0.0/8"]. | - | list(string) | true |
| `enable_aggregation` | Enable the Kubernetes Aggregation Layer. | true | bool | false |
| `enable_tls_bootstrap` | Enable TLS bootstraping for Kubelet. | true | bool | false |
| `encrypt_pod_traffic` | Enable in-cluster pod traffic encryption. If true `network_mtu` is reduced by 60 to make room for the encryption header. | false | bool | false |
Expand All @@ -243,6 +247,7 @@ node_type = var.custom_default_worker_type
| `worker_pool.os_channel` | Flatcar Container Linux channel to install from (stable, beta, alpha, edge). | "stable" | string | false |
| `worker_pool.os_version` | Flatcar Container Linux version to install. Version such as "2303.3.1" or "current". | "current" | string | false |
| `worker_pool.node_type` | Packet instance type for worker nodes. | "c3.small.x86" | string | false |
| `worker_pool.facility` | Packet facility to use for deploying the worker pool. Enable ["Backend Transfer"](https://metal.equinix.com/developers/docs/networking/features/#backend-transfer) on the Equinix Metal project for this to work. | Same as controller nodes. | string | false |
| `worker_pool.labels` | Map of extra Kubernetes Node labels for worker nodes. | - | map(string) | false |
| `worker_pool.taints` | Map of Taints to assign to worker nodes. | - | map(string) | false |
| `worker_pool.reservation_ids` | Block with Packet hardware reservation IDs for worker nodes. Each key must have the format `worker-${index}` and the value is the reservation UUID. Can't be combined with `reservation_ids_default`. Key indexes must be sequential and start from 0. Example: `reservation_ids = { worker-0 = "<reservation_id>" }`. | - | map(string) | false |
Expand Down
34 changes: 17 additions & 17 deletions docs/quickstarts/packet.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,27 +94,27 @@ Create a file named `cluster.lokocfg` with the following contents:

```hcl
cluster "packet" {
asset_dir = "./assets"
cluster_name = "lokomotive-demo"
asset_dir = "./assets"
cluster_name = "lokomotive-demo"
dns {
zone = "example.com"
provider = "route53"
}
facility = "ams1"
facility = "ams1"
project_id = "89273817-4f44-4b41-9f0c-cb00bf538542"
controller_type = "c3.small.x86"
ssh_pubkeys = ["ssh-rsa AAAA..."]
management_cidrs = ["0.0.0.0/0"]
node_private_cidr = "10.0.0.0/8"
ssh_pubkeys = ["ssh-rsa AAAA..."]
management_cidrs = ["0.0.0.0/0"]
node_private_cidrs = ["10.0.0.0/8"]
controller_count = 1
worker_pool "pool-1" {
count = 2
count = 2
node_type = "c3.small.x86"
}
}
Expand Down Expand Up @@ -173,11 +173,11 @@ Your configurations are stored in ./assets
Now checking health and readiness of the cluster nodes ...
Node Ready Reason Message
lokomotive-demo-controller-0 True KubeletReady kubelet is posting ready status
lokomotive-demo-pool-1-worker-0 True KubeletReady kubelet is posting ready status
lokomotive-demo-pool-1-worker-1 True KubeletReady kubelet is posting ready status
Node Ready Reason Message
lokomotive-demo-controller-0 True KubeletReady kubelet is posting ready status
lokomotive-demo-pool-1-worker-0 True KubeletReady kubelet is posting ready status
lokomotive-demo-pool-1-worker-1 True KubeletReady kubelet is posting ready status
Success - cluster is healthy and nodes are ready!
```
Expand Down Expand Up @@ -219,13 +219,13 @@ Sample output:

```
{
"args": {},
"args": {},
"headers": {
"Accept": "*/*",
"Host": "localhost:8080",
"Accept": "*/*",
"Host": "localhost:8080",
"User-Agent": "curl/7.70.0"
},
"origin": "127.0.0.1",
},
"origin": "127.0.0.1",
"url": "http://localhost:8080/get"
}
```
Expand Down
8 changes: 4 additions & 4 deletions examples/packet-production/cluster.lokocfg
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ variable "route53_zone_id" {}
variable "packet_project_id" {}
variable "ssh_public_keys" {}
variable "management_cidrs" {}
variable "node_private_cidr" {}
variable "node_private_cidrs" {}
variable "cert_manager_email" {}
variable "state_s3_bucket" {}
variable "lock_dynamodb_table" {}
Expand Down Expand Up @@ -76,9 +76,9 @@ cluster "packet" {

project_id = var.packet_project_id

ssh_pubkeys = var.ssh_public_keys
management_cidrs = var.management_cidrs
node_private_cidr = var.node_private_cidr
ssh_pubkeys = var.ssh_public_keys
management_cidrs = var.management_cidrs
node_private_cidrs = var.node_private_cidrs

worker_pool "pool-1" {
count = var.workers_count
Expand Down
10 changes: 5 additions & 5 deletions examples/packet-testing/cluster.lokocfg
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ variable "management_cidrs" {
default = ["0.0.0.0/0"]
}

variable "node_private_cidr" {
default = "10.0.0.0/8"
variable "node_private_cidrs" {
default = ["10.0.0.0/8"]
}

cluster "packet" {
Expand All @@ -54,9 +54,9 @@ cluster "packet" {

project_id = var.packet_project_id

ssh_pubkeys = var.ssh_public_keys
management_cidrs = var.management_cidrs
node_private_cidr = var.node_private_cidr
ssh_pubkeys = var.ssh_public_keys
management_cidrs = var.management_cidrs
node_private_cidrs = var.node_private_cidrs

worker_pool "pool-1" {
count = var.workers_count
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module github.com/kinvolk/lokomotive

go 1.12
go 1.15

require (
github.com/MakeNowJust/heredoc v1.0.0 // indirect
Expand Down
Loading

0 comments on commit 7d39863

Please sign in to comment.