Skip to content

Commit

Permalink
Use strict mode for Container Linux Configs
Browse files Browse the repository at this point in the history
* Enable terraform-provider-ct `strict` mode for parsing
Container Linux Configs and snippets
* Fix Container Linux Config systemd unit syntax `enable`
(old) to `enabled`
* Align with Fedora CoreOS which uses strict mode already
  • Loading branch information
dghubble committed Jun 10, 2020
1 parent 8dc170b commit a287920
Show file tree
Hide file tree
Showing 31 changed files with 115 additions and 90 deletions.
11 changes: 11 additions & 0 deletions CHANGES.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,17 @@ Notable changes between versions.
strategy (see [docs](https://typhoon.psdn.io/topics/security/#container-images))
* Update Calico from v3.14.0 to [v3.14.1](https://docs.projectcalico.org/v3.14/release-notes/)

### Fedora CoreOS

#### Azure

* Use `strict` Fedora CoreOS Config (FCC) snippet parsing ([#755](https://github.com/poseidon/typhoon/pull/755))

### Flatcar Linux

* Use `strict` Container Linux Config (CLC) snippet parsing ([#755](https://github.com/poseidon/typhoon/pull/755))
* Require `terraform-provider-ct` v0.4+, recommend v0.5+ (**action required**)

### Addons

* Update Prometheus from v2.18.1 to [v2.19.0-rc.0](https://github.com/prometheus/prometheus/releases/tag/v2.19.0-rc.0)
Expand Down
9 changes: 5 additions & 4 deletions aws/container-linux/kubernetes/cl/controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
systemd:
units:
- name: etcd-member.service
enable: true
enabled: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
Expand All @@ -28,11 +28,11 @@ systemd:
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
- name: docker.service
enable: true
enabled: true
- name: locksmithd.service
mask: true
- name: wait-for-dns.service
enable: true
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Expand All @@ -46,7 +46,7 @@ systemd:
RequiredBy=kubelet.service
RequiredBy=etcd-member.service
- name: kubelet.service
enable: true
enabled: true
contents: |
[Unit]
Description=Kubelet
Expand Down Expand Up @@ -191,6 +191,7 @@ storage:
done
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184
Expand Down
8 changes: 4 additions & 4 deletions aws/container-linux/kubernetes/controllers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,10 @@ resource "aws_instance" "controllers" {

# Controller Ignition configs
data "ct_config" "controller-ignitions" {
count = var.controller_count
content = data.template_file.controller-configs.*.rendered[count.index]
pretty_print = false
snippets = var.controller_snippets
count = var.controller_count
content = data.template_file.controller-configs.*.rendered[count.index]
strict = true
snippets = var.controller_snippets
}

# Controller Container Linux configs
Expand Down
2 changes: 1 addition & 1 deletion aws/container-linux/kubernetes/versions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ terraform {
required_version = "~> 0.12.6"
required_providers {
aws = "~> 2.23"
ct = "~> 0.3"
ct = "~> 0.4"
template = "~> 2.1"
null = "~> 2.1"
}
Expand Down
7 changes: 4 additions & 3 deletions aws/container-linux/kubernetes/workers/cl/worker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
systemd:
units:
- name: docker.service
enable: true
enabled: true
- name: locksmithd.service
mask: true
- name: wait-for-dns.service
enable: true
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Expand All @@ -19,7 +19,7 @@ systemd:
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
enable: true
enabled: true
contents: |
[Unit]
Description=Kubelet
Expand Down Expand Up @@ -115,6 +115,7 @@ storage:
${kubeconfig}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184
Expand Down
6 changes: 3 additions & 3 deletions aws/container-linux/kubernetes/workers/workers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ resource "aws_launch_configuration" "worker" {

# Worker Ignition config
data "ct_config" "worker-ignition" {
content = data.template_file.worker-config.rendered
pretty_print = false
snippets = var.snippets
content = data.template_file.worker-config.rendered
strict = true
snippets = var.snippets
}

# Worker Container Linux config
Expand Down
9 changes: 5 additions & 4 deletions azure/container-linux/kubernetes/cl/controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
systemd:
units:
- name: etcd-member.service
enable: true
enabled: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
Expand All @@ -28,11 +28,11 @@ systemd:
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
- name: docker.service
enable: true
enabled: true
- name: locksmithd.service
mask: true
- name: wait-for-dns.service
enable: true
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Expand All @@ -46,7 +46,7 @@ systemd:
RequiredBy=kubelet.service
RequiredBy=etcd-member.service
- name: kubelet.service
enable: true
enabled: true
contents: |
[Unit]
Description=Kubelet
Expand Down Expand Up @@ -189,6 +189,7 @@ storage:
done
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184
Expand Down
8 changes: 4 additions & 4 deletions azure/container-linux/kubernetes/controllers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,10 @@ resource "azurerm_network_interface_backend_address_pool_association" "controlle

# Controller Ignition configs
data "ct_config" "controller-ignitions" {
count = var.controller_count
content = data.template_file.controller-configs.*.rendered[count.index]
pretty_print = false
snippets = var.controller_snippets
count = var.controller_count
content = data.template_file.controller-configs.*.rendered[count.index]
strict = true
snippets = var.controller_snippets
}

# Controller Container Linux configs
Expand Down
2 changes: 1 addition & 1 deletion azure/container-linux/kubernetes/versions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ terraform {
required_version = "~> 0.12.6"
required_providers {
azurerm = "~> 2.8"
ct = "~> 0.3"
ct = "~> 0.4"
template = "~> 2.1"
null = "~> 2.1"
}
Expand Down
9 changes: 5 additions & 4 deletions azure/container-linux/kubernetes/workers/cl/worker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
systemd:
units:
- name: docker.service
enable: true
enabled: true
- name: locksmithd.service
mask: true
- name: wait-for-dns.service
enable: true
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Expand All @@ -19,7 +19,7 @@ systemd:
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
enable: true
enabled: true
contents: |
[Unit]
Description=Kubelet
Expand Down Expand Up @@ -92,7 +92,7 @@ systemd:
[Install]
WantedBy=multi-user.target
- name: delete-node.service
enable: true
enabled: true
contents: |
[Unit]
Description=Waiting to delete Kubernetes node on shutdown
Expand All @@ -113,6 +113,7 @@ storage:
${kubeconfig}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184
Expand Down
6 changes: 3 additions & 3 deletions azure/container-linux/kubernetes/workers/workers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -97,9 +97,9 @@ resource "azurerm_monitor_autoscale_setting" "workers" {

# Worker Ignition configs
data "ct_config" "worker-ignition" {
content = data.template_file.worker-config.rendered
pretty_print = false
snippets = var.snippets
content = data.template_file.worker-config.rendered
strict = true
snippets = var.snippets
}

# Worker Container Linux configs
Expand Down
8 changes: 4 additions & 4 deletions azure/fedora-coreos/kubernetes/controllers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,10 @@ resource "azurerm_network_interface_backend_address_pool_association" "controlle

# Controller Ignition configs
data "ct_config" "controller-ignitions" {
count = var.controller_count
content = data.template_file.controller-configs.*.rendered[count.index]
pretty_print = false
snippets = var.controller_snippets
count = var.controller_count
content = data.template_file.controller-configs.*.rendered[count.index]
strict = true
snippets = var.controller_snippets
}

# Controller Fedora CoreOS configs
Expand Down
2 changes: 1 addition & 1 deletion azure/fedora-coreos/kubernetes/versions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ terraform {
required_version = "~> 0.12.6"
required_providers {
azurerm = "~> 2.8"
ct = "~> 0.3"
ct = "~> 0.4"
template = "~> 2.1"
null = "~> 2.1"
}
Expand Down
6 changes: 3 additions & 3 deletions azure/fedora-coreos/kubernetes/workers/workers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ resource "azurerm_monitor_autoscale_setting" "workers" {

# Worker Ignition configs
data "ct_config" "worker-ignition" {
content = data.template_file.worker-config.rendered
pretty_print = false
snippets = var.snippets
content = data.template_file.worker-config.rendered
strict = true
snippets = var.snippets
}

# Worker Fedora CoreOS configs
Expand Down
10 changes: 6 additions & 4 deletions bare-metal/container-linux/kubernetes/cl/controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
systemd:
units:
- name: etcd-member.service
enable: true
enabled: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
Expand All @@ -28,11 +28,11 @@ systemd:
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
- name: docker.service
enable: true
enabled: true
- name: locksmithd.service
mask: true
- name: kubelet.path
enable: true
enabled: true
contents: |
[Unit]
Description=Watch for kubeconfig
Expand All @@ -41,7 +41,7 @@ systemd:
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enable: true
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Expand Down Expand Up @@ -161,6 +161,7 @@ storage:
directories:
- path: /etc/kubernetes
filesystem: root
mode: 0755
files:
- path: /etc/hostname
filesystem: root
Expand Down Expand Up @@ -207,6 +208,7 @@ storage:
done
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184
Expand Down
2 changes: 1 addition & 1 deletion bare-metal/container-linux/kubernetes/cl/install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
systemd:
units:
- name: installer.service
enable: true
enabled: true
contents: |
[Unit]
Requires=network-online.target
Expand Down
8 changes: 5 additions & 3 deletions bare-metal/container-linux/kubernetes/cl/worker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
systemd:
units:
- name: docker.service
enable: true
enabled: true
- name: locksmithd.service
mask: true
- name: kubelet.path
enable: true
enabled: true
contents: |
[Unit]
Description=Watch for kubeconfig
Expand All @@ -15,7 +15,7 @@ systemd:
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enable: true
enabled: true
contents: |
[Unit]
Description=Wait for DNS entries
Expand Down Expand Up @@ -114,6 +114,7 @@ storage:
directories:
- path: /etc/kubernetes
filesystem: root
mode: 0755
files:
- path: /etc/hostname
filesystem: root
Expand All @@ -123,6 +124,7 @@ storage:
${domain_name}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
mode: 0644
contents:
inline: |
fs.inotify.max_user_watches=16184
Expand Down
Loading

0 comments on commit a287920

Please sign in to comment.