Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

k8s: Update to 1.18 #217

Merged
merged 6 commits into from
Mar 27, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

Initial release.

* Kubernetes v1.17.4
* Kubernetes v1.18.0
* Running on [Flatcar Container Linux](https://www.flatcar-linux.org/)
* Fully self-hosted, including the kubelet
* Single or multi-master
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
surajssd marked this conversation as resolved.
Show resolved Hide resolved
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/master,node.kubernetes.io/controller=true"
NODE_TAINTS="node-role.kubernetes.io/master=:NoSchedule"
Expand All @@ -144,7 +144,7 @@ storage:
# Wrapper for bootkube start
set -e
# Pre-pull hyperkube image because when it is later pulled but takes too long it times out
docker pull k8s.gcr.io/hyperkube:v1.17.4
docker pull k8s.gcr.io/hyperkube:v1.18.0
# Move experimental manifests
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
exec /usr/bin/rkt run \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/node"
- path: /etc/sysctl.d/max-user-watches.conf
Expand Down Expand Up @@ -133,7 +133,7 @@ storage:
--volume config,kind=host,source=/etc/kubernetes \
--mount volume=config,target=/etc/kubernetes \
--insecure-options=image \
docker://k8s.gcr.io/hyperkube:v1.17.4 \
docker://k8s.gcr.io/hyperkube:v1.18.0 \
--net=host \
--dns=host \
-- \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/master,node.kubernetes.io/controller=true"
NODE_TAINTS="node-role.kubernetes.io/master=:NoSchedule"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/node"
- path: /etc/sysctl.d/max-user-watches.conf
Expand All @@ -118,7 +118,7 @@ storage:
--volume config,kind=host,source=/etc/kubernetes \
--mount volume=config,target=/etc/kubernetes \
--insecure-options=image \
docker://k8s.gcr.io/hyperkube:v1.17.4 \
docker://k8s.gcr.io/hyperkube:v1.18.0 \
--net=host \
--dns=host \
-- \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/master,node.kubernetes.io/controller=true"
NODE_TAINTS="node-role.kubernetes.io/master=:NoSchedule"
Expand Down Expand Up @@ -157,7 +157,7 @@ storage:
# Wrapper for bootkube start
set -e
# Pre-pull hyperkube image because when it is later pulled but takes too long it times out
docker pull k8s.gcr.io/hyperkube:v1.17.4
docker pull k8s.gcr.io/hyperkube:v1.18.0
# Move experimental manifests
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
exec /usr/bin/rkt run \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/node"
- path: /etc/hostname
Expand Down
46 changes: 26 additions & 20 deletions assets/lokomotive-kubernetes/bootkube/assets.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,22 @@ resource "template_dir" "bootstrap-manifests" {
destination_dir = "${var.asset_dir}/bootstrap-manifests"

vars = {
hyperkube_image = var.container_images["hyperkube"]
etcd_servers = join(",", formatlist("https://%s:2379", var.etcd_servers))
cloud_provider = var.cloud_provider
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
trusted_certs_dir = var.trusted_certs_dir
hyperkube_image = var.container_images["hyperkube"]
kube_apiserver_image = var.container_images["kube_apiserver"]
kube_controller_manager_image = var.container_images["kube_controller_manager"]
kube_scheduler_image = var.container_images["kube_scheduler"]
etcd_servers = join(",", formatlist("https://%s:2379", var.etcd_servers))
cloud_provider = var.cloud_provider
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
trusted_certs_dir = var.trusted_certs_dir
}
}

resource "local_file" "kube-apiserver" {
filename = "${var.asset_dir}/charts/kube-system/kube-apiserver.yaml"
content = templatefile("${path.module}/resources/charts/kube-apiserver.yaml", {
hyperkube_image = var.container_images["hyperkube"]
kube_apiserver_image = var.container_images["kube_apiserver"]
pod_checkpointer_image = var.container_images["pod_checkpointer"]
etcd_servers = join(",", formatlist("https://%s:2379", var.etcd_servers))
cloud_provider = var.cloud_provider
Expand Down Expand Up @@ -74,19 +77,22 @@ data "template_file" "kubernetes" {
template = "${file("${path.module}/resources/charts/kubernetes.yaml")}"

vars = {
hyperkube_image = var.container_images["hyperkube"]
coredns_image = "${var.container_images["coredns"]}${var.container_arch}"
control_plane_replicas = max(2, length(var.etcd_servers))
cloud_provider = var.cloud_provider
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
trusted_certs_dir = var.trusted_certs_dir
ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem)
ca_key = base64encode(tls_private_key.kube-ca.private_key_pem)
server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port)
serviceaccount_key = base64encode(tls_private_key.service-account.private_key_pem)
hyperkube_image = var.container_images["hyperkube"]
kube_controller_manager_image = var.container_images["kube_controller_manager"]
kube_scheduler_image = var.container_images["kube_scheduler"]
kube_proxy_image = var.container_images["kube_proxy"]
coredns_image = "${var.container_images["coredns"]}${var.container_arch}"
control_plane_replicas = max(2, length(var.etcd_servers))
cloud_provider = var.cloud_provider
pod_cidr = var.pod_cidr
service_cidr = var.service_cidr
cluster_domain_suffix = var.cluster_domain_suffix
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
trusted_certs_dir = var.trusted_certs_dir
ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem)
ca_key = base64encode(tls_private_key.kube-ca.private_key_pem)
server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port)
serviceaccount_key = base64encode(tls_private_key.service-account.private_key_pem)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,8 @@ spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: ${hyperkube_image}
image: ${kube_apiserver_image}
command:
- /hyperkube
- kube-apiserver
- --advertise-address=$(POD_IP)
- --allow-privileged=true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,8 @@ metadata:
spec:
containers:
- name: kube-controller-manager
image: ${hyperkube_image}
image: ${kube_controller_manager_image}
command:
- ./hyperkube
- kube-controller-manager
- --allocate-node-cidrs=true
- --cluster-cidr=${pod_cidr}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,8 @@ metadata:
spec:
containers:
- name: kube-scheduler
image: ${hyperkube_image}
image: ${kube_scheduler_image}
command:
- ./hyperkube
- kube-scheduler
- --kubeconfig=/etc/kubernetes/secrets/kubeconfig
- --leader-elect=true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ apiserver:
aggregationCaCert: ${aggregation_ca_cert}
aggregationClientCert: ${aggregation_client_cert}
aggregationClientKey: ${aggregation_client_key}
image: ${hyperkube_image}
image: ${kube_apiserver_image}
cloudProvider: ${cloud_provider}
etcdServers: ${etcd_servers}
enableAggregation: ${enable_aggregation}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,7 @@ spec:
- -c
- |
set -xe
exec /hyperkube \
kube-apiserver \
exec /usr/local/bin/kube-apiserver \
--advertise-address=$(POD_IP) \
--allow-privileged=true \
--anonymous-auth=false \
Expand Down Expand Up @@ -109,7 +108,6 @@ spec:
{{ end -}}
--storage-backend=etcd3
{{- else }}
- /hyperkube
- kube-apiserver
- --advertise-address=$(POD_IP)
- --allow-privileged=true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ apiserver:
aggregationCaCert:
aggregationClientCert:
aggregationClientKey:
image: k8s.gcr.io/hyperkube:v1.17.4
image: k8s.gcr.io/kube-apiserver:v1.18.0
cloudProvider:
etcdServers:
aggregationFlags:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
image: k8s.gcr.io/hyperkube:v1.17.4
image: k8s.gcr.io/hyperkube:v1.18.0
clusterDNS: 10.0.0.10
clusterDomain: cluster.local
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,18 @@ controllerManager:
serviceAccountKey: ${serviceaccount_key}
caCert: ${ca_cert}
caKey: ${ca_key}
image: ${hyperkube_image}
image: ${kube_controller_manager_image}
cloudProvider: ${cloud_provider}
serviceCIDR: ${service_cidr}
podCIDR: ${pod_cidr}
controlPlaneReplicas: ${control_plane_replicas}
trustedCertsDir: ${trusted_certs_dir}
kubeProxy:
image: ${hyperkube_image}
image: ${kube_proxy_image}
podCIDR: ${pod_cidr}
trustedCertsDir: ${trusted_certs_dir}
kubeScheduler:
image: ${hyperkube_image}
image: ${kube_scheduler_image}
controlPlaneReplicas: ${control_plane_replicas}
kubeConfigInCluster:
server: ${server}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ spec:
- name: kube-controller-manager
image: {{ .Values.controllerManager.image }}
command:
- ./hyperkube
- kube-controller-manager
- --use-service-account-credentials
- --allocate-node-cidrs=true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ spec:
- name: kube-proxy
image: {{ .Values.kubeProxy.image }}
command:
- ./hyperkube
- kube-proxy
- --cluster-cidr={{ .Values.kubeProxy.podCIDR }}
- --hostname-override=$(NODE_NAME)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ spec:
- name: kube-scheduler
image: "{{ .Values.kubeScheduler.image }}"
command:
- ./hyperkube
- kube-scheduler
- --leader-elect=true
livenessProbe:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,18 @@ controllerManager:
serviceAccountKey:
caCert:
caKey:
image: k8s.gcr.io/hyperkube:v1.17.4
image: k8s.gcr.io/kube-controller-manager:v1.18.0
cloudProvider:
serviceCIDR: 10.0.0.0/24
podCIDR: 10.2.0.0/16
controlPlaneReplicas: 1
trustedCertsDir: /usr/share/ca-certificates
kubeProxy:
image: k8s.gcr.io/hyperkube:v1.17.4
image: k8s.gcr.io/kube-proxy:v1.18.0
podCIDR: 10.2.0.0/16
trustedCertsDir: /usr/share/ca-certificates
kubeScheduler:
image: k8s.gcr.io/hyperkube:v1.17.4
image: k8s.gcr.io/kube-scheduler:v1.18.0
controlPlaneReplicas: 1
kubeConfigInCluster:
server:
Expand Down
14 changes: 9 additions & 5 deletions assets/lokomotive-kubernetes/bootkube/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,15 @@ variable "container_images" {
type = map(string)

default = {
calico = "calico/node:v3.13.1"
calico_cni = "calico/cni:v3.13.1"
hyperkube = "k8s.gcr.io/hyperkube:v1.17.4"
coredns = "coredns/coredns:coredns-"
pod_checkpointer = "kinvolk/pod-checkpointer:83e25e5968391b9eb342042c435d1b3eeddb2be1"
calico = "calico/node:v3.13.1"
calico_cni = "calico/cni:v3.13.1"
hyperkube = "k8s.gcr.io/hyperkube:v1.18.0"
coredns = "coredns/coredns:coredns-"
pod_checkpointer = "kinvolk/pod-checkpointer:83e25e5968391b9eb342042c435d1b3eeddb2be1"
kube_apiserver = "k8s.gcr.io/kube-apiserver:v1.18.0"
kube_controller_manager = "k8s.gcr.io/kube-controller-manager:v1.18.0"
kube_scheduler = "k8s.gcr.io/kube-scheduler:v1.18.0"
kube_proxy = "k8s.gcr.io/kube-proxy:v1.17.4"
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/master,node.kubernetes.io/controller=true"
NODE_TAINTS="node-role.kubernetes.io/master=:NoSchedule"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/node"
- path: /etc/sysctl.d/max-user-watches.conf
Expand All @@ -114,7 +114,7 @@ storage:
--volume config,kind=host,source=/etc/kubernetes \
--mount volume=config,target=/etc/kubernetes \
--insecure-options=image \
docker://k8s.gcr.io/hyperkube:v1.17.4 \
docker://k8s.gcr.io/hyperkube:v1.18.0 \
--net=host \
--dns=host \
-- \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/master,node.kubernetes.io/controller=true"
NODE_TAINTS="node-role.kubernetes.io/master=:NoSchedule"
Expand Down Expand Up @@ -155,7 +155,7 @@ storage:
# Wrapper for bootkube start
set -e
# Pre-pull hyperkube image because when it is later pulled but takes too long it times out
docker pull k8s.gcr.io/hyperkube:v1.17.4
docker pull k8s.gcr.io/hyperkube:v1.18.0
# Move experimental manifests
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
exec /usr/bin/rkt run \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.17.4
KUBELET_IMAGE_TAG=v1.18.0
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/node"
- path: /etc/sysctl.d/max-user-watches.conf
Expand Down Expand Up @@ -140,7 +140,7 @@ storage:
--volume config,kind=host,source=/etc/kubernetes \
--mount volume=config,target=/etc/kubernetes \
--insecure-options=image \
docker://k8s.gcr.io/hyperkube:v1.17.4 \
docker://k8s.gcr.io/hyperkube:v1.18.0 \
--net=host \
--dns=host \
-- \
Expand Down
Loading