From b1298cff3ed616e8fcc1ee99a111a164ba4f4b8f Mon Sep 17 00:00:00 2001 From: "Lubomir I. Ivanov" Date: Wed, 28 Nov 2018 04:09:21 +0200 Subject: [PATCH] kubeadm: add improvements to HA docs (#11094) * kubeadm: add information and diagrams for HA topologies * kubeadm: update HA doc with simplified steps * kubeadm: update HA doc with simplified steps * edit ha, add new topology topic, reorder by weight * troubleshoot markdown * fix more markdown, fix links * more markdown * more markdown * more markdown * changes after reviewer comments * add steps about Weave * update note about stacked topology --- .../en/docs/setup/independent/ha-topology.md | 71 +++ .../setup/independent/high-availability.md | 516 ++++++------------ .../setup/independent/kubelet-integration.md | 2 +- .../independent/setup-ha-etcd-with-kubeadm.md | 2 +- .../independent/troubleshooting-kubeadm.md | 2 +- .../kubeadm-ha-topology-external-etcd.svg | 1 + .../kubeadm-ha-topology-stacked-etcd.svg | 1 + 7 files changed, 231 insertions(+), 364 deletions(-) create mode 100644 content/en/docs/setup/independent/ha-topology.md create mode 100755 static/images/kubeadm/kubeadm-ha-topology-external-etcd.svg create mode 100755 static/images/kubeadm/kubeadm-ha-topology-stacked-etcd.svg diff --git a/content/en/docs/setup/independent/ha-topology.md b/content/en/docs/setup/independent/ha-topology.md new file mode 100644 index 0000000000000..2cd28f32ec8e7 --- /dev/null +++ b/content/en/docs/setup/independent/ha-topology.md @@ -0,0 +1,71 @@ +--- +reviewers: +- sig-cluster-lifecycle +title: Options for Highly Available Topology +content_template: templates/concept +weight: 50 +--- + +{{% capture overview %}} + +This page explains the two options for configuring the topology of your highly available (HA) Kubernetes clusters. + +You can set up an HA cluster: + +- With stacked control plane nodes, where etcd nodes are colocated with control plane nodes +- With external etcd nodes, where etcd runs on separate nodes from the control plane + +You should carefully consider the advantages and disadvantages of each topology before setting up an HA cluster. + +{{% /capture %}} + +{{% capture body %}} + +## Stacked etcd topology + +A stacked HA cluster is a [topology](https://en.wikipedia.org/wiki/Network_topology) where the distributed +data storage cluster provided by etcd is stacked on top of the cluster formed by the nodes managed by +kubeadm that run control plane components. + +Each control plane node runs an instance of the `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager`. +The `kube-apiserver` is exposed to worker nodes using a load balancer. + +Each control plane node creates a local etcd member and this etcd member communicate only with +the `kube-apiserver` of this node. The same applies to the local `kube-controller-manager` +and `kube-scheduler` instances. + +This topology couples the control planes and etcd members on the same nodes. It is simpler to set up than a cluster +with external etcd nodes, and simpler to manage for replication. + +However, a stacked cluster runs the risk of failed coupling. If one node goes down, both an etcd member and a control +plane instance are lost, and redundancy is compromised. You can mitigate this risk by adding more control plane nodes. + +You should therefore run a minimum of three stacked control plane nodes for an HA cluster. + +This is the default topology in kubeadm. A local etcd member is created automatically +on control plane nodes when using `kubeadm init` and `kubeadm join --experimental-control-plane`. + +![Stacked etcd topology](/images/kubeadm/kubeadm-ha-topology-stacked-etcd.svg) + +## External etcd topology + +An HA cluster with external etcd is a [topology](https://en.wikipedia.org/wiki/Network_topology) where the distributed data storage cluster provided by etcd is external to the cluster formed by the nodes that run control plane components. + +Like the stacked etcd topology, each control plane node in an external etcd topology runs an instance of the `kube-apiserver`, `kube-scheduler`, and `kube-controller-manager`. And the `kube-apiserver` is exposed to worker nodes using a load balancer. However, etcd members run on separate hosts, and each etcd host communicates with the `kube-apiserver` of each control plane node. + +This topology decouples the control plane and etcd member. It therefore provides an HA setup where +losing a control plane instance or an etcd member has less impact and does not affect +the cluster redundancy as much as the stacked HA topology. + +However, this topology requires twice the number of hosts as the stacked HA topology. +A minimum of three hosts for control plane nodes and three hosts for etcd nodes are required for an HA cluster with this topology. + +![External etcd topology](/images/kubeadm/kubeadm-ha-topology-external-etcd.svg) + +{{% /capture %}} + +{{% capture whatsnext %}} + +- [Set up a highly available cluster with kubeadm](/docs/setup/independent/high-availability/) + +{{% /capture %}} \ No newline at end of file diff --git a/content/en/docs/setup/independent/high-availability.md b/content/en/docs/setup/independent/high-availability.md index 31cc2ed84a0e1..e0e6a66249688 100644 --- a/content/en/docs/setup/independent/high-availability.md +++ b/content/en/docs/setup/independent/high-availability.md @@ -3,7 +3,7 @@ reviewers: - sig-cluster-lifecycle title: Creating Highly Available Clusters with kubeadm content_template: templates/task -weight: 50 +weight: 60 --- {{% capture overview %}} @@ -11,15 +11,23 @@ weight: 50 This page explains two different approaches to setting up a highly available Kubernetes cluster using kubeadm: -- With stacked masters. This approach requires less infrastructure. etcd members +- With stacked control plane nodes. This approach requires less infrastructure. The etcd members and control plane nodes are co-located. - With an external etcd cluster. This approach requires more infrastructure. The control plane nodes and etcd members are separated. +Before proceeding, you should carefully consideer which approach best meets the needs of your applications +and environment. [This comparison topic](/docs/setup/independent/ha-topology/) outlines the advantages and disadvantages of each. + Your clusters must run Kubernetes version 1.12 or later. You should also be aware that -setting up HA clusters with kubeadm is still experimental. You might encounter issues -with upgrading your clusters, for example. We encourage you to try either approach, -and provide feedback. +setting up HA clusters with kubeadm is still experimental and will be further simplified +in future versions. You might encounter issues with upgrading your clusters, for example. +We encourage you to try either approach, and provide us with feedback in the kubeadm +[issue tracker](https://github.com/kubernetes/kubeadm/issues/new). + +Note that the alpha feature gate `HighAvailability` is deprecated in v1.12 and removed in v1.13. + +See also [The HA upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-ha). {{< caution >}} **Caution**: This page does not address running your cluster on a cloud provider. @@ -40,9 +48,10 @@ For both methods you need this infrastructure: requirements](/docs/setup/independent/install-kubeadm/#before-you-begin) for the workers - Full network connectivity between all machines in the cluster (public or - private network is fine) -- SSH access from one device to all nodes in the system + private network) - sudo privileges on all machines +- SSH access from one device to all nodes in the system +- `kubeadm` and `kubelet` installed on all machines. `kubectl` is optional. For the external etcd cluster only, you also need: @@ -61,43 +70,15 @@ needed. ## First steps for both methods {{< note >}} -**Note**: All commands in this guide on any control plane or etcd node should be +**Note**: All commands on any control plane or etcd node should be run as root. {{< /note >}} -- Find your pod CIDR. For details, see [the CNI network - documentation](/docs/setup/independent/create-cluster-kubeadm/#pod-network). - The example uses Calico, so the pod CIDR is `192.168.0.0/16`. - -### Configure SSH - -1. Enable ssh-agent on your main device that has access to all other nodes in - the system: - - ``` - eval $(ssh-agent) - ``` - -1. Add your SSH identity to the session: - - ``` - ssh-add ~/.ssh/path_to_private_key - ``` - -1. SSH between nodes to check that the connection is working correctly. - - - When you SSH to any node, make sure to add the `-A` flag: - - ``` - ssh -A 10.0.0.7 - ``` - - - When using sudo on any node, make sure to preserve the environment so SSH - forwarding works: - - ``` - sudo -E -s - ``` +- Some CNI network plugins like Calico require a CIDR such as `192.168.0.0/16` and + some like Weave do not. See the see [the CNI network + documentation](/docs/setup/independent/create-cluster-kubeadm/#pod-network). + To add a pod CIDR set the `podSubnet: 192.168.0.0/16` field under + the `networking` object of `ClusterConfiguration`. ### Create load balancer for kube-apiserver @@ -121,6 +102,11 @@ different configuration. on the apiserver port. It must also allow incoming traffic on its listening port. + - [HAProxy](http://www.haproxy.org/) can be used as a load balancer. + + - Make sure the address of the load balancer always matches + the address of kubeadm's `ControlPlaneEndpoint`. + 1. Add the first control plane nodes to the load balancer and test the connection: @@ -135,95 +121,43 @@ different configuration. 1. Add the remaining control plane nodes to the load balancer target group. -## Stacked control plane nodes +### Configure SSH -### Bootstrap the first stacked control plane node +SSH is required if you want to control all nodes from a single machine. -{{< note >}} -**Note**: Optionally replace `stable` with a different version of Kubernetes, for example `v1.12.0`. -{{< /note >}} +1. Enable ssh-agent on your main device that has access to all other nodes in + the system: -1. Create a `kubeadm-config.yaml` template file: + ``` + eval $(ssh-agent) + ``` - apiVersion: kubeadm.k8s.io/v1beta1 - kind: ClusterConfiguration - kubernetesVersion: stable - apiServer: - certSANs: - - "LOAD_BALANCER_DNS" - controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - local: - extraArgs: - listen-client-urls: "https://127.0.0.1:2379,https://CP0_IP:2379" - advertise-client-urls: "https://CP0_IP:2379" - listen-peer-urls: "https://CP0_IP:2380" - initial-advertise-peer-urls: "https://CP0_IP:2380" - initial-cluster: "CP0_HOSTNAME=https://CP0_IP:2380" - serverCertSANs: - - CP0_HOSTNAME - - CP0_IP - peerCertSANs: - - CP0_HOSTNAME - - CP0_IP - networking: - # This CIDR is a Calico default. Substitute or remove for your CNI provider. - podSubnet: "192.168.0.0/16" - -1. Replace the following variables in the template with the appropriate - values for your cluster: - - * `LOAD_BALANCER_DNS` - * `LOAD_BALANCER_PORT` - * `CP0_HOSTNAME` - * `CP0_IP` - -1. Run `kubeadm init --config kubeadm-config.yaml` - -### Copy required files to other control plane nodes - -The following certificates and other required files were created when you ran `kubeadm init`. -Copy these files to your other control plane nodes: - -- `/etc/kubernetes/pki/ca.crt` -- `/etc/kubernetes/pki/ca.key` -- `/etc/kubernetes/pki/sa.key` -- `/etc/kubernetes/pki/sa.pub` -- `/etc/kubernetes/pki/front-proxy-ca.crt` -- `/etc/kubernetes/pki/front-proxy-ca.key` -- `/etc/kubernetes/pki/etcd/ca.crt` -- `/etc/kubernetes/pki/etcd/ca.key` - -Copy the admin kubeconfig to the other control plane nodes: - -- `/etc/kubernetes/admin.conf` - -In the following example, replace -`CONTROL_PLANE_IPS` with the IP addresses of the other control plane nodes. - -```sh -USER=ubuntu # customizable -CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" -for host in ${CONTROL_PLANE_IPS}; do - scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: - scp /etc/kubernetes/pki/ca.key "${USER}"@$host: - scp /etc/kubernetes/pki/sa.key "${USER}"@$host: - scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: - scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: - scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: - scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt - scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key - scp /etc/kubernetes/admin.conf "${USER}"@$host: -done -``` +1. Add your SSH identity to the session: -{{< note >}} -**Note**: Remember that your config may differ from this example. -{{< /note >}} + ``` + ssh-add ~/.ssh/path_to_private_key + ``` + +1. SSH between nodes to check that the connection is working correctly. -### Add the second stacked control plane node + - When you SSH to any node, make sure to add the `-A` flag: -1. Create a second, different `kubeadm-config.yaml` template file: + ``` + ssh -A 10.0.0.7 + ``` + + - When using sudo on any node, make sure to preserve the environment so SSH + forwarding works: + + ``` + sudo -E -s + ``` + +## Stacked control plane and etcd nodes + +### Steps for the first control plane node + +1. On the first control plane node, create a configuration file called `kubeadm-config.yaml`: apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration @@ -232,128 +166,68 @@ done certSANs: - "LOAD_BALANCER_DNS" controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - local: - extraArgs: - listen-client-urls: "https://127.0.0.1:2379,https://CP1_IP:2379" - advertise-client-urls: "https://CP1_IP:2379" - listen-peer-urls: "https://CP1_IP:2380" - initial-advertise-peer-urls: "https://CP1_IP:2380" - initial-cluster: "CP0_HOSTNAME=https://CP0_IP:2380,CP1_HOSTNAME=https://CP1_IP:2380" - initial-cluster-state: existing - serverCertSANs: - - CP1_HOSTNAME - - CP1_IP - peerCertSANs: - - CP1_HOSTNAME - - CP1_IP - networking: - # This CIDR is a calico default. Substitute or remove for your CNI provider. - podSubnet: "192.168.0.0/16" - -1. Replace the following variables in the template with the appropriate values for your cluster: - - - `LOAD_BALANCER_DNS` - - `LOAD_BALANCER_PORT` - - `CP0_HOSTNAME` - - `CP0_IP` - - `CP1_HOSTNAME` - - `CP1_IP` - -1. Move the copied files to the correct locations: + + - `kubernetesVersion` should be set to the Kubernetes version to use. This + example uses `stable`. + - `controlPlaneEndpoint` should match the address or DNS and port of the load balancer. + - It's recommended that the versions of kubeadm, kubelet, kubectl and Kubernetes match. + +1. Make sure that the node is in a clean state: ```sh - USER=ubuntu # customizable - mkdir -p /etc/kubernetes/pki/etcd - mv /home/${USER}/ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/ca.key /etc/kubernetes/pki/ - mv /home/${USER}/sa.pub /etc/kubernetes/pki/ - mv /home/${USER}/sa.key /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ - mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt - mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key - mv /home/${USER}/admin.conf /etc/kubernetes/admin.conf + sudo kubeadm init --config=kubeadm-config.yaml + ``` + + You should see something like: + + ```sh + ... + You can now join any number of machines by running the following on each node + as root: + + kubeadm join 192.168.0.200:6443 --token j04n3m.octy8zely83cy2ts --discovery-token-ca-cert-hash sha256:84938d2a22203a8e56a787ec0c6ddad7bc7dbd52ebabc62fd5f4dbea72b14d1f ``` -1. Run the kubeadm phase commands to bootstrap the kubelet: +1. Copy this output to a text file. You will need it later to join other control plane nodes to the + cluster. + +1. Apply the Weave CNI plugin: ```sh - kubeadm alpha phase certs all --config kubeadm-config.yaml - kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml - kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml - kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml - systemctl start kubelet + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" ``` -1. Run the commands to add the node to the etcd cluster: +1. Type the following and watch the pods of the components get started: ```sh - export CP0_IP=10.0.0.7 - export CP0_HOSTNAME=cp0 - export CP1_IP=10.0.0.8 - export CP1_HOSTNAME=cp1 - - export KUBECONFIG=/etc/kubernetes/admin.conf - kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP1_HOSTNAME} https://${CP1_IP}:2380 - kubeadm alpha phase etcd local --config kubeadm-config.yaml + kubectl get pod -n kube-system -w ``` - - This command causes the etcd cluster to become unavailable for a - brief period, after the node is added to the running cluster, and before the - new node is joined to the etcd cluster. + - It's recommended that you join new control plane nodes only after the first node has finished initializing. -1. Deploy the control plane components and mark the node as a master: +1. Copy the certificate files from the first control plane node to the rest: + In the following example, replace `CONTROL_PLANE_IPS` with the IP addresses of the + other control plane nodes. ```sh - kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml - kubeadm alpha phase controlplane all --config kubeadm-config.yaml - kubeadm alpha phase kubelet config annotate-cri --config kubeadm-config.yaml - kubeadm alpha phase mark-master --config kubeadm-config.yaml + USER=ubuntu # customizable + CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" + for host in ${CONTROL_PLANE_IPS}; do + scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt + scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key + scp /etc/kubernetes/admin.conf "${USER}"@$host: + done ``` -### Add the third stacked control plane node +### Steps for the rest of the control plane nodes -1. Create a third, different `kubeadm-config.yaml` template file: - - apiVersion: kubeadm.k8s.io/v1beta1 - kind: ClusterConfiguration - kubernetesVersion: stable - apiServer: - certSANs: - - "LOAD_BALANCER_DNS" - controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - local: - extraArgs: - listen-client-urls: "https://127.0.0.1:2379,https://CP2_IP:2379" - advertise-client-urls: "https://CP2_IP:2379" - listen-peer-urls: "https://CP2_IP:2380" - initial-advertise-peer-urls: "https://CP2_IP:2380" - initial-cluster: "CP0_HOSTNAME=https://CP0_IP:2380,CP1_HOSTNAME=https://CP1_IP:2380,CP2_HOSTNAME=https://CP2_IP:2380" - initial-cluster-state: existing - serverCertSANs: - - CP2_HOSTNAME - - CP2_IP - peerCertSANs: - - CP2_HOSTNAME - - CP2_IP - networking: - # This CIDR is a calico default. Substitute or remove for your CNI provider. - podSubnet: "192.168.0.0/16" - -1. Replace the following variables in the template with the appropriate values for your cluster: - - - `LOAD_BALANCER_DNS` - - `LOAD_BALANCER_PORT` - - `CP0_HOSTNAME` - - `CP0_IP` - - `CP1_HOSTNAME` - - `CP1_IP` - - `CP2_HOSTNAME` - - `CP2_IP` - -1. Move the copied files to the correct locations: +1. Move the files created by the previous step where `scp` was used: ```sh USER=ubuntu # customizable @@ -369,81 +243,45 @@ done mv /home/${USER}/admin.conf /etc/kubernetes/admin.conf ``` -1. Run the kubeadm phase commands to bootstrap the kubelet: - - ```sh - kubeadm alpha phase certs all --config kubeadm-config.yaml - kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml - kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml - kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml - systemctl start kubelet - ``` + This process writes all the requested files in the `/etc/kubernetes` folder. -1. Run the commands to add the node to the etcd cluster: +1. Start `kubeadm` on this node: ```sh - export CP0_IP=10.0.0.7 - export CP0_HOSTNAME=cp0 - export CP2_IP=10.0.0.9 - export CP2_HOSTNAME=cp2 - - export KUBECONFIG=/etc/kubernetes/admin.conf - kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP2_HOSTNAME} https://${CP2_IP}:2380 - kubeadm alpha phase etcd local --config kubeadm-config.yaml + sudo kubeadm join 192.168.0.200:6443 --token j04n3m.octy8zely83cy2ts --discovery-token-ca-cert-hash sha256:84938d2a22203a8e56a787ec0c6ddad7bc7dbd52ebabc62fd5f4dbea72b14d1f --experimental-control-plane ``` -1. Deploy the control plane components and mark the node as a master: + - Notice that this is the command that was returned from running `kubeadm init` on the first node, with the addition of the `--experimental-control-plane` flag. This flag automates joining this control plane node to the cluster. + +1. Type the following and watch the pods of the components get started: ```sh - kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml - kubeadm alpha phase controlplane all --config kubeadm-config.yaml - kubeadm alpha phase kubelet config annotate-cri --config kubeadm-config.yaml - kubeadm alpha phase mark-master --config kubeadm-config.yaml + kubectl get pod -n kube-system -w ``` -## External etcd - -### Set up the cluster - -- Follow [these instructions](/docs/setup/independent/setup-ha-etcd-with-kubeadm/) - to set up the etcd cluster. - -#### Copy required files from an etcd node to all control plane nodes - -In the following example, replace `USER` and `CONTROL_PLANE_HOSTS` values with values -for your environment. +1. Repeat these steps for the rest of the control plane nodes. -```sh -# Make a list of required etcd certificate files -cat << EOF > etcd-pki-files.txt -/etc/kubernetes/pki/etcd/ca.crt -/etc/kubernetes/pki/apiserver-etcd-client.crt -/etc/kubernetes/pki/apiserver-etcd-client.key -EOF +## External etcd nodes -# create the archive -tar -czf etcd-pki.tar.gz -T etcd-pki-files.txt +### Set up the etcd cluster -# copy the archive to the control plane nodes -USER=ubuntu -CONTROL_PLANE_HOSTS="10.0.0.7 10.0.0.8 10.0.0.9" -for host in $CONTROL_PLANE_HOSTS; do - scp etcd-pki.tar.gz "${USER}"@$host: -done -``` +- Follow [these instructions](/docs/setup/independent/setup-ha-etcd-with-kubeadm/) + to set up the etcd cluster. ### Set up the first control plane node -1. Extract the etcd certificates +1. Copy the following files from any node from the etcd cluster to this node: - mkdir -p /etc/kubernetes/pki - tar -xzf etcd-pki.tar.gz -C /etc/kubernetes/pki --strip-components=3 + ```sh + export CONTROL_PLANE="ubuntu@10.0.0.7" + +scp /etc/kubernetes/pki/etcd/ca.crt "${CONTROL_PLANE}": + +scp /etc/kubernetes/pki/apiserver-etcd-client.crt "${CONTROL_PLANE}": + +scp /etc/kubernetes/pki/apiserver-etcd-client.key "${CONTROL_PLANE}": + ``` -1. Create a `kubeadm-config.yaml`: + - Replace the value of `CONTROL_PLANE` with the `user@host` of this machine. -{{< note >}} -**Note**: Optionally replace `stable` with a different version of Kubernetes, for example `v1.11.3`. -{{< /note >}} +1. Create a file called `kubeadm-config.yaml` with the following contents: apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration @@ -461,82 +299,38 @@ done caFile: /etc/kubernetes/pki/etcd/ca.crt certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key - networking: - # This CIDR is a calico default. Substitute or remove for your CNI provider. - podSubnet: "192.168.0.0/16" - -1. Replace the following variables in the template with the appropriate values for your cluster: - - - `LOAD_BALANCER_DNS` - - `LOAD_BALANCER_PORT` - - `ETCD_0_IP` - - `ETCD_1_IP` - - `ETCD_2_IP` - -1. Run `kubeadm init --config kubeadm-config.yaml` -1. Copy the output join commamnd. - -### Copy required files to the correct locations - -The following pki files were created during the `kubeadm init` step and must be shared with -all other control plane nodes. - -- `/etc/kubernetes/pki/ca.crt` -- `/etc/kubernetes/pki/ca.key` -- `/etc/kubernetes/pki/sa.key` -- `/etc/kubernetes/pki/sa.pub` -- `/etc/kubernetes/pki/front-proxy-ca.crt` -- `/etc/kubernetes/pki/front-proxy-ca.key` - -In the following example, replace the list of -`CONTROL_PLANE_IPS` values with the IP addresses of the other control plane nodes. - -```sh -# make a list of required kubernetes certificate files -cat << EOF > certificate_files.txt -/etc/kubernetes/pki/ca.crt -/etc/kubernetes/pki/ca.key -/etc/kubernetes/pki/sa.key -/etc/kubernetes/pki/sa.pub -/etc/kubernetes/pki/front-proxy-ca.crt -/etc/kubernetes/pki/front-proxy-ca.key -EOF - -# create the archive -tar -czf control-plane-certificates.tar.gz -T certificate_files.txt - -USER=ubuntu # customizable -CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" -for host in ${CONTROL_PLANE_IPS}; do - scp control-plane-certificates.tar.gz "${USER}"@$host: -done -``` - -### Set up the other control plane nodes - -1. Extract the required certificates - - mkdir -p /etc/kubernetes/pki - tar -xzf etcd-pki.tar.gz -C /etc/kubernetes/pki --strip-components 3 - tar -xzf control-plane-certificates.tar.gz -C /etc/kubernetes/pki --strip-components 3 - -1. Verify the location of the copied files. - Your `/etc/kubernetes` directory should look like this: - - - `/etc/kubernetes/pki/apiserver-etcd-client.crt` - - `/etc/kubernetes/pki/apiserver-etcd-client.key` - - `/etc/kubernetes/pki/ca.crt` - - `/etc/kubernetes/pki/ca.key` - - `/etc/kubernetes/pki/front-proxy-ca.crt` - - `/etc/kubernetes/pki/front-proxy-ca.key` - - `/etc/kubernetes/pki/sa.key` - - `/etc/kubernetes/pki/sa.pub` - - `/etc/kubernetes/pki/etcd/ca.crt` - -1. Run the copied `kubeadm join` command from above. Add the flag "--experimental-control-plane". - The final command will look something like this: - - kubeadm join ha.k8s.example.com:6443 --token 5ynki1.3erp9i3yo7gqg1nv --discovery-token-ca-cert-hash sha256:a00055bd8c710a9906a3d91b87ea02976334e1247936ac061d867a0f014ecd81 --experimental-control-plane + + - The difference between stacked etcd and external etcd here is that we are using the `external` field for `etcd` in the kubeadm config. In the case of the stacked etcd topology this is managed automatically. + + - Replace the following variables in the template with the appropriate values for your cluster: + + - `LOAD_BALANCER_DNS` + - `LOAD_BALANCER_PORT` + - `ETCD_0_IP` + - `ETCD_1_IP` + - `ETCD_2_IP` + +1. Run `kubeadm init --config kubeadm-config.yaml` on this node. + +1. Write the join command that is returned to a text file for later use. + +1. Apply the Weave CNI plugin: + + ```sh + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + ``` + +### Steps for the rest of the control plane nodes + +To add the rest of the control plane nodes, follow [these instructions](#steps-for-the-rest-of-the-control-plane-nodes). +The steps are the same as for the stacked etcd setup, with the exception that a local +etcd member is not created. + +To summarize: + +- Make sure the first control plane node is fully initialized. +- Copy certificates between the first control plane node and the other control plane nodes. +- Join each control plane node with the join command you saved to a text file, plus the `--experimental-control-plane` flag. ## Common tasks after bootstrapping control plane @@ -549,6 +343,6 @@ in the master configuration file. ### Install workers Each worker node can now be joined to the cluster with the command returned from any of the -`kubeadm init` commands. +`kubeadm init` commands. The flag `--experimental-control-plane` should not be added to worker nodes. {{% /capture %}} diff --git a/content/en/docs/setup/independent/kubelet-integration.md b/content/en/docs/setup/independent/kubelet-integration.md index cd56d6c4b437b..b77ae43be36d7 100644 --- a/content/en/docs/setup/independent/kubelet-integration.md +++ b/content/en/docs/setup/independent/kubelet-integration.md @@ -3,7 +3,7 @@ reviewers: - sig-cluster-lifecycle title: Configuring each kubelet in your cluster using kubeadm content_template: templates/concept -weight: 40 +weight: 80 --- {{% capture overview %}} diff --git a/content/en/docs/setup/independent/setup-ha-etcd-with-kubeadm.md b/content/en/docs/setup/independent/setup-ha-etcd-with-kubeadm.md index 76ccdbc4f1a26..3562d4fc1b248 100644 --- a/content/en/docs/setup/independent/setup-ha-etcd-with-kubeadm.md +++ b/content/en/docs/setup/independent/setup-ha-etcd-with-kubeadm.md @@ -3,7 +3,7 @@ reviewers: - sig-cluster-lifecycle title: Set up a High Availability etcd cluster with kubeadm content_template: templates/task -weight: 60 +weight: 70 --- {{% capture overview %}} diff --git a/content/en/docs/setup/independent/troubleshooting-kubeadm.md b/content/en/docs/setup/independent/troubleshooting-kubeadm.md index 372240e5043ab..aab7cd2f86cc1 100644 --- a/content/en/docs/setup/independent/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/independent/troubleshooting-kubeadm.md @@ -1,7 +1,7 @@ --- title: Troubleshooting kubeadm content_template: templates/concept -weight: 70 +weight: 90 --- {{% capture overview %}} diff --git a/static/images/kubeadm/kubeadm-ha-topology-external-etcd.svg b/static/images/kubeadm/kubeadm-ha-topology-external-etcd.svg new file mode 100755 index 0000000000000..3a1d6ee027768 --- /dev/null +++ b/static/images/kubeadm/kubeadm-ha-topology-external-etcd.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/static/images/kubeadm/kubeadm-ha-topology-stacked-etcd.svg b/static/images/kubeadm/kubeadm-ha-topology-stacked-etcd.svg new file mode 100755 index 0000000000000..c7c0f701376ab --- /dev/null +++ b/static/images/kubeadm/kubeadm-ha-topology-stacked-etcd.svg @@ -0,0 +1 @@ + \ No newline at end of file