diff --git a/README.md b/README.md index fe21ba8..5306dea 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Current features: * Automatic backup and recovery. So if your master gets terminated, when the replacement is provisioned by AWS it will pick up where the old one left off without you doing anything. 😁 * Completely automated provisioning through Terraform and Bash. -* Variables for many things including number of workers (requested through an auto-scaling group) and EC2 instance type. +* Variables for many things including number of workers (provisioned using an auto-scaling group) and EC2 instance type. * [External DNS](https://github.com/kubernetes-incubator/external-dns) and [Nginx Ingess](https://github.com/kubernetes/ingress-nginx) as a cheap ELB alternative. * Auto Scaling of worker nodes, if you enable the [Cluster AutoScaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler). * Persistent Volumes using GP2 storage on EBS. @@ -22,7 +22,7 @@ Current features: 2. [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) 3. Generate token: `python -c 'import random; print "%0x.%0x" % (random.SystemRandom().getrandbits(3*8), random.SystemRandom().getrandbits(8*8))' > token.txt` 4. Make an SSH key on us-east-1 from the AWS console -5. Run terraform plan: `terraform plan -var k8s-ssh-key= -var k8stoken=$(cat token.txt) -var admin-cidr-blocks="/32"` +5. Run terraform plan: `terraform plan -var k8s-ssh-key= -var k8stoken=$(cat token.txt) -var admin-cidr-blocks="/32" -var nginx-ingress-domain="ingress.mydomain.com"` 6. Build out infrastructure: `terraform apply -var k8s-ssh-key= -var k8stoken=$(cat token.txt) -var admin-cidr-blocks="/32"` 7. SSH to K8S master and run something: `ssh ubuntu@$(terraform output master_dns) -i .pem kubectl get no` 10. Done! @@ -32,7 +32,7 @@ Optional Variables: * `min-worker-count` - The minimum size of the worker node Auto-Scaling Group (1 by default) * `max-worker-count` - The maximum size of the worker node Auto-Scaling Group (1 by default) * `region` - Which AWS region to use (us-east-1 by default) -* `kubernetes-version` - Which Kubernetes/kubeadm version to install (1.11.5 by default) +* `kubernetes-version` - Which Kubernetes/kubeadm version to install (1.13.1 by default) * `master-instance-type` - Which EC2 instance type to use for the master node (m1.small by default) * `master-spot-price` - The maximum spot bid for the master node ($0.01 by default) * `worker-instance-type` - Which EC2 instance type to use for the worker nodes (m1.small by default) @@ -40,10 +40,17 @@ Optional Variables: * `cluster-name` - Used for naming the created AWS resources (k8s by default) * `backup-enabled` - Set to "0" to disable the automatic etcd backups (1 by default) * `backup-cron-expression` - A cron expression to use for the automatic etcd backups (`*/15 * * * *` by default) -* `external-dns-enabled` - Set to "0" to disable ExternalDNS (1 by default) +* `external-dns-enabled` - Set to "0" to disable ExternalDNS (1 by default) - Existing Route 53 Domain required * `nginx-ingress-enabled` - Set to "0" to disable Nginx Ingress (1 by default) +* `nginx-ingress-domain` - The DNS name to map to Nginx Ingress using External DNS ("" by default) * `cluster-autoscaler-enabled` - Set to "1" to enable the cluster autoscaler (0 by default) +### Ingress Notes + +As hinted above, this uses Nginx Ingress as an alternative to a Load Balancer. This is done by exposing ports 443 and 80 directly on each of the nodes (Workers and the Master) using a NodePort type Service. Unfortunately External DNS doesn't seem to work with Nginx Ingress when you expose it in this way, so I've had to just map a single DNS name (using the nginx-ingress-domain variable) to the NodePort service itself. External DNS will keep that entry up to date with the IPs of the nodes in the cluster; you will then have to manually add CNAME entries for your individual services. + +I am well aware that this isn't the most secure way of exposing services, but it's secure enough for my purposes. If anyone has any suggestions on a better way of doing this without shelling out $20 a month for an ELB, please open an Issue! + ### Contributing I've written this as a personal project and will do my best to maintain it to a good standard, despite having very limited free time. I very much welcome contributions in the form of Pull Requests and Issues (for both bugs and feature requests). diff --git a/main.tf b/main.tf index 7f4d638..14c00b5 100644 --- a/main.tf +++ b/main.tf @@ -196,6 +196,21 @@ resource "aws_s3_bucket_object" "nginx-ingress-manifest" { etag = "${md5(file("manifests/nginx-ingress-mandatory.yaml"))}" } +data "template_file" "nginx-ingress-nodeport-manifest" { + count = "${var.nginx-ingress-enabled}" + template = "${file("manifests/nginx-ingress-nodeport.yaml.tmpl")}" + vars { + nginx_ingress_domain = "${var.nginx-ingress-domain}" + } +} +resource "aws_s3_bucket_object" "nginx-ingress-nodeport-manifest" { + count = "${var.nginx-ingress-enabled}" + bucket = "${aws_s3_bucket.s3-bucket.id}" + key = "manifests/nginx-ingress-nodeport.yaml" + content = "${data.template_file.nginx-ingress-nodeport-manifest.rendered}" + etag = "${md5(data.template_file.nginx-ingress-nodeport-manifest.rendered)}" +} + data "template_file" "cluster-autoscaler-manifest" { template = "${file("manifests/cluster-autoscaler-autodiscover.yaml.tmpl")}" vars { diff --git a/manifests/external-dns.yaml b/manifests/external-dns.yaml index a1c8ff0..752f837 100644 --- a/manifests/external-dns.yaml +++ b/manifests/external-dns.yaml @@ -53,9 +53,8 @@ spec: - name: external-dns image: registry.opensource.zalan.do/teapot/external-dns:v0.5.8 args: - - --source=ingress + - --source=service - --provider=aws - --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization - --registry=txt - --txt-owner-id=k8s - - --log-level=debug diff --git a/manifests/nginx-ingress-nodeport.yaml b/manifests/nginx-ingress-nodeport.yaml.tmpl similarity index 84% rename from manifests/nginx-ingress-nodeport.yaml rename to manifests/nginx-ingress-nodeport.yaml.tmpl index dd82ed3..77eff27 100644 --- a/manifests/nginx-ingress-nodeport.yaml +++ b/manifests/nginx-ingress-nodeport.yaml.tmpl @@ -6,6 +6,8 @@ metadata: labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx + annotations: + external-dns.alpha.kubernetes.io/hostname: ${nginx_ingress_domain} spec: type: NodePort ports: diff --git a/master.sh b/master.sh index 707b794..1561583 100644 --- a/master.sh +++ b/master.sh @@ -92,10 +92,10 @@ if [ $(aws s3 ls s3://${s3bucket}/etcd-backups/ | wc -l) -ne 0 ]; then mv default.etcd/member /var/lib/etcd/ echo "Running kubeadm init" - kubeadm init --ignore-preflight-errors=DirAvailable--var-lib-etcd --config=init-config.yaml + kubeadm init --ignore-preflight-errors="DirAvailable--var-lib-etcd,NumCPU" --config=init-config.yaml else echo "Running kubeadm init" - kubeadm init --config=init-config.yaml + kubeadm init --config=init-config.yaml --ignore-preflight-errors=NumCPU touch /tmp/fresh-cluster fi @@ -111,7 +111,7 @@ if [ -f /tmp/fresh-cluster ]; then su -c 'kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/13a990bb716c82a118b8e825b78189dcfbfb2f1e/Documentation/kube-flannel.yml' ubuntu mkdir /tmp/manifests aws s3 sync s3://${s3bucket}/manifests/ /tmp/manifests - su -c 'kubectl apply -n kube-system -f /tmp/manifests/' ubuntu + su -c 'kubectl apply -f /tmp/manifests/' ubuntu fi # Set up backups if they have been enabled diff --git a/variables.tf b/variables.tf index 235a874..040dfe1 100644 --- a/variables.tf +++ b/variables.tf @@ -49,7 +49,7 @@ variable "region" { } variable "kubernetes-version" { - default = "1.12.2" + default = "1.13.1" description = "Which version of Kubernetes to install" } @@ -103,6 +103,11 @@ variable "nginx-ingress-enabled" { description = "Whether or not to enable nginx ingress. (1 for enabled, 0 for disabled)" } +variable "nginx-ingress-domain" { + default = "" + description = "The DNS name to map to Nginx Ingress (using External DNS)" +} + variable "cluster-autoscaler-enabled" { default = "0" description = "Whether or not to enable the cluster autoscaler. (1 for enabled, 0 for disabled)"