Skip to content

Commit

Permalink
Upgrade k8s 1.23 (#324)
Browse files Browse the repository at this point in the history
  • Loading branch information
errm authored Aug 18, 2022
1 parent 0b1692e commit b0b66d0
Show file tree
Hide file tree
Showing 18 changed files with 45 additions and 59 deletions.
10 changes: 5 additions & 5 deletions .github/actions/terratest/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
FROM golang:1.17.3-alpine3.13
FROM golang:1.19-alpine3.16

WORKDIR /

ARG TERRAFORM_VERSION=1.2.3
ARG KUBECTL_VERSION=1.22.11

ARG TERRAFORM_VERSION=1.2.7
ARG KUBECTL_VERSION=1.23.7

RUN apk add --no-cache \
bash \
Expand All @@ -14,7 +13,8 @@ RUN apk add --no-cache \
git \
jq \
perl-utils \
aws-cli && \
py3-pip && \
pip install awscli && \
git clone https://github.com/tfutils/tfenv.git ~/.tfenv && \
echo 'export PATH="$HOME/.tfenv/bin:$PATH"' >> ~/.bash_profile && ln -s ~/.tfenv/bin/* /usr/local/bin && \
tfenv install $TERRAFORM_VERSION && \
Expand Down
2 changes: 1 addition & 1 deletion .terraform-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.2.3
1.2.7
2 changes: 0 additions & 2 deletions examples/cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@ module "cluster" {
vpc_config = data.terraform_remote_state.environment.outputs.vpc_config
iam_config = data.terraform_remote_state.environment.outputs.iam_config

aws_ebs_csi_driver = var.aws_ebs_csi_driver

critical_addons_node_group_key_name = "development"

endpoint_public_access = true
Expand Down
5 changes: 0 additions & 5 deletions examples/cluster/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,3 @@ variable "cluster_name" {
type = string
default = "test-cluster"
}

variable "aws_ebs_csi_driver" {
type = bool
default = true
}
2 changes: 1 addition & 1 deletion hack/generate_addons.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@ helm_template() {
helm template --no-hooks --namespace=kube-system --version $3 -f $ADDONS_DIR/helm/$2.yaml $2 $1/$2${4:-} | grep -v Helm > $ADDONS_DIR/$2.yaml
}

helm_template autoscaler cluster-autoscaler 9.18.1
helm_template autoscaler cluster-autoscaler 9.19.3
6 changes: 1 addition & 5 deletions modules/asg_node_group/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -214,10 +214,6 @@ module "bottlerocket_nodes" {
bottlerocket = true
}
```
⚠️ If you are using bottlerocket nodes and need EBS persistent volumes you must
enable the [AWS EBS CSI driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) by setting `aws_ebs_csi_driver = true` on the cluster module.
see: https://github.com/bottlerocket-os/bottlerocket/blob/develop/QUICKSTART-EKS.md#csi-plugin

⚠️ Bottlerocket now [supports GPU nodes](https://github.com/bottlerocket-os/bottlerocket/blob/develop/QUICKSTART-EKS.md#aws-k8s--nvidia-variants), set `gpu = true` to enable them. Ensure that you set `instance_types` to a GPU instance type.

📝 If you want to get a shell session on your instances via Bottlerocket's SSM agent
Expand All @@ -230,4 +226,4 @@ provision your node role, then this is done by default!
By default, IMDSv2 will be enabled through the variable nodes_metadata_http_tokens.

⚠️ If you are using kube2iam change the default value to "optional". [terraform IMDSv2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template#metadata-options)
Once we don't have any cluster using kube2iam, this variable can be removed and forced to be required the token.
Once we don't have any cluster using kube2iam, this variable can be removed and forced to be required the token.
2 changes: 1 addition & 1 deletion modules/asg_node_group/main.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
locals {
k8s_version = "1.22"
k8s_version = "1.23"
preset_instance_families = {
memory_optimized = ["r5", "r5d", "r5n", "r5dn", "r5a", "r5ad"]
general_purpose = ["m5", "m5d", "m5n", "m5dn", "m5a", "m5ad"]
Expand Down
1 change: 0 additions & 1 deletion modules/asg_node_group/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ variable "cluster_config" {
node_security_group = string
node_instance_profile = string
tags = map(string)
aws_ebs_csi_driver = bool
})
}

Expand Down
1 change: 0 additions & 1 deletion modules/cluster/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ specify the arn of an existing key by setting `kms_cmk_arn`
| addon | variable | default | iam role variable |
|-------|----------|---------|-------------------|
| [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) | `cluster_autoscaler` | ✅ enabled | `cluster_autoscaler_iam_role_arn` |
| [Amazon Elastic Block Store (EBS) CSI driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/) | `aws_ebs_csi_driver` | ❌ disabled | `aws_ebs_csi_driver_iam_role_arn` |

Note that setting these variables to false will not remove provisioned add-ons from an existing cluster.

Expand Down
23 changes: 18 additions & 5 deletions modules/cluster/addons.tf
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ module "critical_addons_node_group" {
taints = {
"CriticalAddonsOnly" = "true:NoSchedule"
}

depends_on = [
module.aws_auth
]
}

data "aws_region" "current" {}
Expand All @@ -29,31 +33,37 @@ data "aws_region" "current" {}
resource "aws_eks_addon" "vpc-cni" {
cluster_name = local.config.name
addon_name = "vpc-cni"
addon_version = "v1.11.0-eksbuild.1"
addon_version = "v1.11.2-eksbuild.1"
resolve_conflicts = "OVERWRITE"
}

resource "aws_eks_addon" "kube-proxy" {
cluster_name = local.config.name
addon_name = "kube-proxy"
addon_version = "v1.22.6-eksbuild.1"
addon_version = "v1.23.7-eksbuild.1"
resolve_conflicts = "OVERWRITE"
}

resource "aws_eks_addon" "coredns" {
cluster_name = local.config.name
addon_name = "coredns"
addon_version = "v1.8.7-eksbuild.1"
addon_version = "v1.8.7-eksbuild.2"
resolve_conflicts = "OVERWRITE"
depends_on = [
module.critical_addons_node_group
]
}

resource "aws_eks_addon" "ebs-csi" {
count = var.aws_ebs_csi_driver ? 1 : 0
count = 1
cluster_name = local.config.name
addon_name = "aws-ebs-csi-driver"
addon_version = "v1.6.1-eksbuild.1"
addon_version = "v1.10.0-eksbuild.1"
service_account_role_arn = local.aws_ebs_csi_driver_iam_role_arn
resolve_conflicts = "OVERWRITE"
depends_on = [
module.critical_addons_node_group
]
}

module "cluster_autoscaler" {
Expand All @@ -68,4 +78,7 @@ module "cluster_autoscaler" {
aws_region = data.aws_region.current.name
}
)
depends_on = [
module.critical_addons_node_group
]
}
20 changes: 10 additions & 10 deletions modules/cluster/addons/cluster-autoscaler.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
spec:
# Prevents running in privileged mode
privileged: false
Expand Down Expand Up @@ -50,7 +50,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
name: cluster-autoscaler
namespace: kube-system
spec:
Expand All @@ -68,7 +68,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
name: cluster-autoscaler
namespace: kube-system
annotations:
Expand All @@ -83,7 +83,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
name: cluster-autoscaler
rules:
- apiGroups:
Expand Down Expand Up @@ -232,7 +232,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
name: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
Expand All @@ -250,7 +250,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
name: cluster-autoscaler
namespace: kube-system
rules:
Expand Down Expand Up @@ -278,7 +278,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
name: cluster-autoscaler
namespace: kube-system
roleRef:
Expand All @@ -297,7 +297,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
name: cluster-autoscaler
namespace: kube-system
spec:
Expand All @@ -320,7 +320,7 @@ metadata:
labels:
app.kubernetes.io/instance: "cluster-autoscaler"
app.kubernetes.io/name: "aws-cluster-autoscaler"
helm.sh/chart: "cluster-autoscaler-9.18.1"
helm.sh/chart: "cluster-autoscaler-9.19.3"
name: cluster-autoscaler
namespace: kube-system
spec:
Expand All @@ -339,7 +339,7 @@ spec:
dnsPolicy: "ClusterFirst"
containers:
- name: aws-cluster-autoscaler
image: "us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.22.2"
image: "k8s.gcr.io/autoscaling/cluster-autoscaler:v1.23.1"
imagePullPolicy: "IfNotPresent"
command:
- ./cluster-autoscaler
Expand Down
4 changes: 2 additions & 2 deletions modules/cluster/addons/helm/cluster-autoscaler.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ extraArgs:
expander: least-waste
balance-similar-node-groups: true
image:
repository: us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler
tag: v1.22.2
repository: k8s.gcr.io/autoscaling/cluster-autoscaler
tag: v1.23.1
fullnameOverride: cluster-autoscaler
nameOverride: aws-cluster-autoscaler
resources:
Expand Down
2 changes: 1 addition & 1 deletion modules/cluster/aws_ebs_csi_driver_iam.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
locals {
aws_ebs_csi_driver_iam_role_count = length(var.aws_ebs_csi_driver_iam_role_arn) == 0 && var.aws_ebs_csi_driver ? 1 : 0
aws_ebs_csi_driver_iam_role_count = length(var.aws_ebs_csi_driver_iam_role_arn) == 0 ? 1 : 0
aws_ebs_csi_driver_iam_role_arn = length(var.aws_ebs_csi_driver_iam_role_arn) > 0 ? var.aws_ebs_csi_driver_iam_role_arn : join("", aws_iam_role.aws_ebs_csi_driver.*.arn)
}

Expand Down
3 changes: 2 additions & 1 deletion modules/cluster/kubectl/kubeconfig.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@ users:
- name: ${cluster_name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
apiVersion: client.authentication.k8s.io/v1beta1
command: aws
interactiveMode: IfAvailable
args:
- "eks"
- "get-token"
Expand Down
6 changes: 3 additions & 3 deletions modules/cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ data "aws_iam_role" "service_role" {
name = var.iam_config.service_role
}
locals {
k8s_version = "1.22"
k8s_version = "1.23"
}

resource "aws_eks_cluster" "control_plane" {
Expand Down Expand Up @@ -96,8 +96,8 @@ module "storage_classes" {
manifest = templatefile(
"${path.module}/storage_classes.yaml.tmpl",
{
provisioner = var.aws_ebs_csi_driver ? "ebs.csi.aws.com" : "kubernetes.io/aws-ebs",
fstype = var.aws_ebs_csi_driver ? "csi.storage.k8s.io/fstype: ${var.pv_fstype}" : "fsType: ${var.pv_fstype}"
provisioner = "ebs.csi.aws.com",
fstype = "csi.storage.k8s.io/fstype: ${var.pv_fstype}",
}
)
}
Expand Down
1 change: 0 additions & 1 deletion modules/cluster/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ locals {
node_security_group = aws_eks_cluster.control_plane.vpc_config.0.cluster_security_group_id
node_instance_profile = var.iam_config.node_role
tags = var.tags
aws_ebs_csi_driver = var.aws_ebs_csi_driver
}
}

Expand Down
6 changes: 0 additions & 6 deletions modules/cluster/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,6 @@ variable "cluster_autoscaler_iam_role_arn" {
description = "The IAM role for the cluster_autoscaler, if omitted then an IAM role will be created"
}

variable "aws_ebs_csi_driver" {
type = bool
default = false
description = "Should the Amazon Elastic Block Store (EBS) CSI driver be deployed"
}

variable "aws_ebs_csi_driver_iam_role_arn" {
type = string
default = ""
Expand Down
8 changes: 0 additions & 8 deletions test/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ func TestTerraformAwsEksCluster(t *testing.T) {
deployTerraform(t, environmentDir, map[string]interface{}{})
deployTerraform(t, workingDir, map[string]interface{}{
"cluster_name": clusterName,
"aws_ebs_csi_driver": false,
})
})

Expand Down Expand Up @@ -82,20 +81,13 @@ func TestTerraformAwsEksCluster(t *testing.T) {
validateClusterAutoscaler(t, kubeconfig)
validateKubeBench(t, kubeconfig)
validateStorage(t, kubeconfig)
overideAndApplyTerraform(t, workingDir, map[string]interface{}{
"aws_ebs_csi_driver": true,
})
validateStorage(t, kubeconfig)
})

test_structure.RunTestStage(t, "validate_bottlerocket_node_group", func() {
terraformOptions := test_structure.LoadTerraformOptions(t, workingDir)
kubeconfig := writeKubeconfig(t, terraform.Output(t, terraformOptions, "cluster_name"))
defer os.Remove(kubeconfig)
nodeGroupDir := "../examples/cluster/bottlerocket_node_group"
overideAndApplyTerraform(t, workingDir, map[string]interface{}{
"aws_ebs_csi_driver": true,
})
deployTerraform(t, nodeGroupDir, map[string]interface{}{})
defer cleanupTerraform(t, nodeGroupDir)
validateClusterAutoscaler(t, kubeconfig)
Expand Down

0 comments on commit b0b66d0

Please sign in to comment.