Skip to content
This repository has been archived by the owner on Jan 9, 2023. It is now read-only.

Use core-dns over kube-dns for clusters >= 1.10 #715

Merged
merged 5 commits into from
Feb 7, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
325 changes: 97 additions & 228 deletions puppet/modules/kubernetes/Gemfile.lock

Large diffs are not rendered by default.

5 changes: 0 additions & 5 deletions puppet/modules/kubernetes/manifests/apply.pp
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@
$manifests = [],
$force = false,
$format = 'yaml',
$systemd_wants = [],
$systemd_requires = [],
$systemd_after = [],
$systemd_before = [],
Enum['manifests','concat'] $type = 'manifests',
){
require ::kubernetes
Expand Down Expand Up @@ -73,6 +69,5 @@
refreshonly => true,
command => $command,
require => [ Service[$service_apiserver], Service[$service_kube_addon_manager] ],
logoutput => true,
}
}
12 changes: 12 additions & 0 deletions puppet/modules/kubernetes/manifests/delete.pp
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# deletes resources to a kubernetes master
define kubernetes::delete(
$format = 'yaml',
){
require ::kubernetes

$apply_file = "${::kubernetes::apply_dir}/${name}.${format}"

file {$apply_file:
ensure => absent,
}
}
38 changes: 31 additions & 7 deletions puppet/modules/kubernetes/manifests/dns.pp
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,38 @@
$version_before_1_6 = true
}

kubernetes::apply{'kube-dns':
manifests => [
$post_1_10 = versioncmp($::kubernetes::version, '1.10.0') >= 0

if $post_1_10 {
$app_name = 'core-dns'
$delete_app_name = 'kube-dns'
$label_name = 'CoreDNS'

$manifests = [
template('kubernetes/core-dns-config-map.yaml.erb'),
template('kubernetes/core-dns-deployment.yaml.erb'),
]

} else {
$app_name = 'kube-dns'
$delete_app_name = 'core-dns'
$label_name = 'KubeDNS'

$manifests = [
template('kubernetes/kube-dns-config-map.yaml.erb'),
template('kubernetes/kube-dns-service-account.yaml.erb'),
template('kubernetes/kube-dns-deployment.yaml.erb'),
template('kubernetes/kube-dns-svc.yaml.erb'),
template('kubernetes/kube-dns-horizontal-autoscaler-deployment.yaml.erb'),
template('kubernetes/kube-dns-horizontal-autoscaler-rbac.yaml.erb'),
],
]
}

kubernetes::apply{$app_name:
manifests => concat(
$manifests,
template('kubernetes/dns-service-account.yaml.erb'),
template('kubernetes/dns-svc.yaml.erb'),
template('kubernetes/dns-horizontal-autoscaler-deployment.yaml.erb'),
template('kubernetes/dns-horizontal-autoscaler-rbac.yaml.erb'),
template('kubernetes/dns-cluster-role.yaml.erb'),
template('kubernetes/dns-cluster-role-binding.yaml.erb'),
),
} -> kubernetes::delete{$delete_app_name:}
}
58 changes: 53 additions & 5 deletions puppet/modules/kubernetes/spec/classes/dns_spec.rb
Original file line number Diff line number Diff line change
@@ -1,13 +1,61 @@
require 'spec_helper'

describe 'kubernetes::dns' do
let(:pre_condition) do
[
'include kubernetes::apiserver'
]
context 'with default values for all parameters' do
let(:pre_condition) do
"
class{'kubernetes': version => '1.9.0'}
define kubernetes::apply(
$manifests,
){
kubernetes::addon_manager_labels($manifests[0])
}
define kubernetes::delete(){}
"
end

let(:manifests) do
catalogue.resource('Kubernetes::Apply', 'kube-dns').send(:parameters)[:manifests]
end

it { should contain_class('kubernetes::dns') }

it 'be valid yaml' do
manifests.each do |manifest|
YAML.parse manifest
end
end

it 'should write manifests' do
expect(manifests.join('\n---\n')).to match(%{--domain=cluster\.local\.})
expect(manifests.join('\n---\n')).to match(%{clusterIP: 10\.254\.0\.10})
end
end

context 'with default values for all parameters' do
context 'with version 1.11' do
let(:pre_condition) do
"
class{'kubernetes': version => '1.11.0'}
define kubernetes::apply(
$manifests,
){
kubernetes::addon_manager_labels($manifests[0])
}
define kubernetes::delete(){}
"
end

let(:manifests) do
catalogue.resource('Kubernetes::Apply', 'core-dns').send(:parameters)[:manifests]
end

it 'be valid yaml' do
manifests.each do |manifest|
YAML.parse manifest
end
end

it { should contain_class('kubernetes::dns') }

end
end
23 changes: 23 additions & 0 deletions puppet/modules/kubernetes/templates/core-dns-config-map.yaml.erb
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
Corefile: |
.:53 {
errors
health
kubernetes <%= @cluster_domain %> in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we setup the annotation to scrape it?

proxy . /etc/resolv.conf
cache 30
reload
loadbalance
}
88 changes: 88 additions & 0 deletions puppet/modules/kubernetes/templates/core-dns-deployment.yaml.erb
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: core-dns
namespace: kube-system
labels:
k8s-app: core-dns
kubernetes.io/name: "CoreDNS"
addonmanager.kubernetes.io/mode: Reconcile
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: core-dns
template:
metadata:
labels:
k8s-app: core-dns
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9153'
scheduler.alpha.kubernetes.io/critical-pod: ''
<%- if @version_before_1_6 -%>
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
<%- end -%>
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
containers:
- name: core-dns
image: coredns/coredns:1.2.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
<%- if @rbac_enabled -%>
serviceAccountName: core-dns
<%- end -%>
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
addonmanager.kubernetes.io/mode: Reconcile
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:<%= @app_name %>
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:<%= @app_name %>
subjects:
- kind: ServiceAccount
name: <%= @app_name %>
namespace: kube-system
18 changes: 18 additions & 0 deletions puppet/modules/kubernetes/templates/dns-cluster-role.yaml.erb
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:<%= @app_name %>
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,17 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-autoscaler
name: <%= @app_name %>-autoscaler
namespace: kube-system
labels:
k8s-app: kube-dns-autoscaler
k8s-app: <%= @app_name %>-autoscaler
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
spec:
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
k8s-app: <%= @app_name %>-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
<%- if @version_before_1_6 -%>
Expand All @@ -42,9 +42,9 @@ spec:
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --configmap=<%= @app_name %>-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kube-dns
- --target=Deployment/<%= @app_name %>
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":<%= @min_replicas %>,"preventSinglePointFailure":true}}
Expand All @@ -56,5 +56,5 @@ spec:
operator: "Exists"
<%- end -%>
<%- if @rbac_enabled -%>
serviceAccountName: kube-dns-autoscaler
serviceAccountName: <%= @app_name %>-autoscaler
<%- end -%>
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: kube-dns-autoscaler
name: <%= @app_name %>-autoscaler
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
Expand All @@ -28,7 +28,7 @@ apiVersion: rbac.authorization.k8s.io/v1alpha1
apiVersion: rbac.authorization.k8s.io/v1beta1
<%- end -%>
metadata:
name: system:kube-dns-autoscaler
name: system:<%= @app_name %>-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
Expand All @@ -54,15 +54,15 @@ apiVersion: rbac.authorization.k8s.io/v1alpha1
apiVersion: rbac.authorization.k8s.io/v1beta1
<%- end -%>
metadata:
name: system:kube-dns-autoscaler
name: system:<%= @app_name %>-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
name: <%= @app_name %>-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:kube-dns-autoscaler
name: system:<%= @app_name %>-autoscaler
apiGroup: rbac.authorization.k8s.io
<%- end -%>
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
name: <%= @app_name %>
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@ metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
k8s-app: <%= @app_name %>
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
kubernetes.io/name: "<%= @label_name %>"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kube-dns
k8s-app: <%= @app_name %>
clusterIP: <%= @cluster_dns %>
ports:
- name: dns
Expand Down
Loading