From 615b2011359164cafd6898304438fe6d0c62ebaa Mon Sep 17 00:00:00 2001 From: Simon Gerber Date: Tue, 9 Jul 2024 10:06:25 +0200 Subject: [PATCH 1/5] Move `isOpenshift` check to `util.libsonnet` --- component/helm-namespace.jsonnet | 6 +++--- component/util.libsonnet | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/component/helm-namespace.jsonnet b/component/helm-namespace.jsonnet index d874cef4..b92e4a9c 100644 --- a/component/helm-namespace.jsonnet +++ b/component/helm-namespace.jsonnet @@ -1,14 +1,14 @@ // main template for cilium local kap = import 'lib/kapitan.libjsonnet'; local kube = import 'lib/kube.libjsonnet'; +local util = import 'util.libsonnet'; + local inv = kap.inventory(); // The hiera parameters for the component local params = inv.parameters.cilium; -local isOpenshift = std.startsWith(inv.parameters.facts.distribution, 'openshift'); - local additionalOpenshiftMeta = - if isOpenshift then + if util.isOpenshift then { labels+: { 'openshift.io/cluster-logging': 'true', diff --git a/component/util.libsonnet b/component/util.libsonnet index dbf2fb77..cadd697a 100644 --- a/component/util.libsonnet +++ b/component/util.libsonnet @@ -1,3 +1,8 @@ +local kap = import 'lib/kapitan.libjsonnet'; + +local inv = kap.inventory(); +local isOpenshift = std.member([ 'openshift4', 'oke' ], inv.parameters.facts.distribution); + local parse_version(ver) = local verparts = std.split(ver, '.'); local parseOrError(val, typ) = @@ -16,5 +21,6 @@ local parse_version(ver) = }; { + isOpenshift: isOpenshift, parse_version: parse_version, } From a3c36c57e481a52d98103b332d408aa20e6edbe1 Mon Sep 17 00:00:00 2001 From: Simon Gerber Date: Tue, 9 Jul 2024 10:06:41 +0200 Subject: [PATCH 2/5] Ensure that OpenShift doesn't deploy kube-proxy when `kubeProxyReplacement=true/strict` To ensure that the kube-proxy is deployed correctly if users switch back to `kubeProxyReplacement=false` we always deploy the Patch resource to configure the `network.operator.openshift.io/cluster` resource. --- class/cilium.yml | 2 ++ component/ocp-manage-kube-proxy.jsonnet | 30 +++++++++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 component/ocp-manage-kube-proxy.jsonnet diff --git a/class/cilium.yml b/class/cilium.yml index 9d9d8785..8457a91f 100644 --- a/class/cilium.yml +++ b/class/cilium.yml @@ -26,6 +26,7 @@ parameters: - ${_base_directory}/component/aggregated-clusterroles.jsonnet - ${_base_directory}/component/egress-gateway-policies.jsonnet - ${_base_directory}/component/bgp-control-plane.jsonnet + - ${_base_directory}/component/ocp-manage-kube-proxy.jsonnet input_type: jsonnet output_path: ${_instance}/ @@ -51,6 +52,7 @@ parameters: - ${_base_directory}/component/aggregated-clusterroles.jsonnet - ${_base_directory}/component/egress-gateway-policies.jsonnet - ${_base_directory}/component/bgp-control-plane.jsonnet + - ${_base_directory}/component/ocp-manage-kube-proxy.jsonnet input_type: jsonnet output_path: ${_instance}/ - input_paths: diff --git a/component/ocp-manage-kube-proxy.jsonnet b/component/ocp-manage-kube-proxy.jsonnet new file mode 100644 index 00000000..83849da7 --- /dev/null +++ b/component/ocp-manage-kube-proxy.jsonnet @@ -0,0 +1,30 @@ +local kap = import 'lib/kapitan.libjsonnet'; +local kube = import 'lib/kube.libjsonnet'; +local po = import 'lib/patch-operator.libsonnet'; +local util = import 'util.libsonnet'; + +local inv = kap.inventory(); +local params = inv.parameters.cilium; + +local fullReplacement = std.member( + [ 'strict', 'true' ], + params.cilium_helm_values.kubeProxyReplacement +); + + +local target = kube._Object('operator.openshift.io/v1', 'Network', 'cluster'); + +local template = { + spec: { + deployKubeProxy: !fullReplacement, + }, +}; + +local patch = po.Patch(target, template, patchstrategy='application/merge-patch+json'); + +if util.isOpenshift then + { + '99_networkoperator_kube_proxy_patch': patch, + } +else + {} From c28805c1c0431d9feec1843acd68581c794c7f60 Mon Sep 17 00:00:00 2001 From: Simon Gerber Date: Tue, 9 Jul 2024 10:10:41 +0200 Subject: [PATCH 3/5] Add test case for `kubeProxyReplacement=true` on OpenShift Rendered from template version: main (2ae1bc3) --- .cruft.json | 4 +- .github/workflows/test.yaml | 2 + Makefile.vars.mk | 2 +- .../cilium/apps/cilium.yaml | 0 .../00_cilium_namespace.yaml | 11 + .../templates/cilium-agent/clusterrole.yaml | 104 +++++ .../cilium-agent/clusterrolebinding.yaml | 14 + .../templates/cilium-agent/daemonset.yaml | 410 ++++++++++++++++++ .../cilium/templates/cilium-agent/role.yaml | 16 + .../templates/cilium-agent/rolebinding.yaml | 15 + .../templates/cilium-agent/service.yaml | 23 + .../cilium-agent/serviceaccount.yaml | 5 + .../cilium-agent/servicemonitor.yaml | 27 ++ .../cilium/templates/cilium-configmap.yaml | 110 +++++ .../cilium-operator/clusterrole.yaml | 190 ++++++++ .../cilium-operator/clusterrolebinding.yaml | 14 + .../templates/cilium-operator/deployment.yaml | 108 +++++ .../cilium-operator/serviceaccount.yaml | 5 + .../templates/cilium-secrets-namespace.yaml | 2 + .../templates/hubble-relay/configmap.yaml | 10 + .../templates/hubble-relay/deployment.yaml | 79 ++++ .../templates/hubble-relay/service.yaml | 17 + .../hubble-relay/serviceaccount.yaml | 5 + .../cilium/templates/hubble/peer-service.yaml | 18 + .../cilium/02_aggregated_clusterroles.yaml | 67 +++ .../99_networkoperator_kube_proxy_patch.yaml | 22 + tests/kubeproxyreplacement-strict.yml | 18 + 27 files changed, 1295 insertions(+), 3 deletions(-) create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/apps/cilium.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/00_cilium_namespace.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/clusterrole.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/clusterrolebinding.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/daemonset.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/role.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/rolebinding.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/service.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/serviceaccount.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/servicemonitor.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-configmap.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/clusterrole.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/clusterrolebinding.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/deployment.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/serviceaccount.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-secrets-namespace.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/configmap.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/deployment.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/service.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/serviceaccount.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble/peer-service.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/02_aggregated_clusterroles.yaml create mode 100644 tests/golden/kubeproxyreplacement-strict/cilium/cilium/99_networkoperator_kube_proxy_patch.yaml create mode 100644 tests/kubeproxyreplacement-strict.yml diff --git a/.cruft.json b/.cruft.json index bf247fd1..33305618 100644 --- a/.cruft.json +++ b/.cruft.json @@ -1,13 +1,13 @@ { "template": "https://github.com/projectsyn/commodore-component-template.git", - "commit": "26ee71e475cca036551c68a6c6b2285fe86139a0", + "commit": "2ae1bc3383f211eee5f20a963f5ac74725d85d5b", "checkout": "main", "context": { "cookiecutter": { "name": "Cilium", "slug": "cilium", "parameter_key": "cilium", - "test_cases": "defaults helm-opensource olm-opensource egress-gateway bgp-control-plane", + "test_cases": "defaults helm-opensource olm-opensource egress-gateway bgp-control-plane kubeproxyreplacement-strict", "add_lib": "n", "add_pp": "n", "add_golden": "y", diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 8f3dcb6e..e949ae93 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -37,6 +37,7 @@ jobs: - olm-opensource - egress-gateway - bgp-control-plane + - kubeproxyreplacement-strict defaults: run: working-directory: ${{ env.COMPONENT_NAME }} @@ -56,6 +57,7 @@ jobs: - olm-opensource - egress-gateway - bgp-control-plane + - kubeproxyreplacement-strict defaults: run: working-directory: ${{ env.COMPONENT_NAME }} diff --git a/Makefile.vars.mk b/Makefile.vars.mk index 7ce5e6e6..2e23b770 100644 --- a/Makefile.vars.mk +++ b/Makefile.vars.mk @@ -57,4 +57,4 @@ KUBENT_IMAGE ?= ghcr.io/doitintl/kube-no-trouble:latest KUBENT_DOCKER ?= $(DOCKER_CMD) $(DOCKER_ARGS) $(root_volume) --entrypoint=/app/kubent $(KUBENT_IMAGE) instance ?= defaults -test_instances = tests/defaults.yml tests/helm-opensource.yml tests/olm-opensource.yml tests/egress-gateway.yml tests/bgp-control-plane.yml +test_instances = tests/defaults.yml tests/helm-opensource.yml tests/olm-opensource.yml tests/egress-gateway.yml tests/bgp-control-plane.yml tests/kubeproxyreplacement-strict.yml diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/apps/cilium.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/apps/cilium.yaml new file mode 100644 index 00000000..e69de29b diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/00_cilium_namespace.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/00_cilium_namespace.yaml new file mode 100644 index 00000000..cc210b1e --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/00_cilium_namespace.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Namespace +metadata: + annotations: + openshift.io/node-selector: '' + labels: + name: cilium + openshift.io/cluster-logging: 'true' + openshift.io/cluster-monitoring: 'true' + openshift.io/run-level: '0' + name: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/clusterrole.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/clusterrole.yaml new file mode 100644 index 00000000..c5a71720 --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/clusterrole.yaml @@ -0,0 +1,104 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/part-of: cilium + name: cilium +rules: + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + - get + - apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + verbs: + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create + - apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get + - apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update + - apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + verbs: + - patch diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/clusterrolebinding.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/clusterrolebinding.yaml new file mode 100644 index 00000000..b6cf96eb --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/part-of: cilium + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: + - kind: ServiceAccount + name: cilium + namespace: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/daemonset.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/daemonset.yaml new file mode 100644 index 00000000..7aba7980 --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/daemonset.yaml @@ -0,0 +1,410 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium + k8s-app: cilium + name: cilium + namespace: cilium +spec: + selector: + matchLabels: + k8s-app: cilium + template: + metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined + container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined + container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined + container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined + labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium + k8s-app: cilium + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + automountServiceAccountToken: true + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + image: quay.io/cilium/cilium:v1.14.10@sha256:0a1bcd2859c6d18d60dba6650cca8c707101716a3e47b126679040cbd621c031 + imagePullPolicy: IfNotPresent + lifecycle: + postStart: + exec: + command: + - bash + - -c + - | + set -o errexit + set -o pipefail + set -o nounset + + # When running in AWS ENI mode, it's likely that 'aws-node' has + # had a chance to install SNAT iptables rules. These can result + # in dropped traffic, so we should attempt to remove them. + # We do it using a 'postStart' hook since this may need to run + # for nodes which might have already been init'ed but may still + # have dangling rules. This is safe because there are no + # dependencies on anything that is part of the startup script + # itself, and can be safely run multiple times per node (e.g. in + # case of a restart). + if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; + then + echo 'Deleting iptables rules created by the AWS CNI VPC plugin' + iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore + fi + echo 'Done!' + preStop: + exec: + command: + - /cni-uninstall.sh + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: 'true' + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: + - containerPort: 4244 + hostPort: 4244 + name: peer-service + protocol: TCP + - containerPort: 9962 + hostPort: 9962 + name: prometheus + protocol: TCP + - containerPort: 9964 + hostPort: 9964 + name: envoy-metrics + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: 'true' + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: 'true' + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /tmp + name: tmp + hostNetwork: true + initContainers: + - command: + - cilium + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + image: quay.io/cilium/cilium:v1.14.10@sha256:0a1bcd2859c6d18d60dba6650cca8c707101716a3e47b126679040cbd621c031 + imagePullPolicy: IfNotPresent + name: config + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /var/lib/cni/bin + image: quay.io/cilium/cilium:v1.14.10@sha256:0a1bcd2859c6d18d60dba6650cca8c707101716a3e47b126679040cbd621c031 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /var/lib/cni/bin + image: quay.io/cilium/cilium:v1.14.10@sha256:0a1bcd2859c6d18d60dba6650cca8c707101716a3e47b126679040cbd621c031 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: quay.io/cilium/cilium:v1.14.10@sha256:0a1bcd2859c6d18d60dba6650cca8c707101716a3e47b126679040cbd621c031 + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: quay.io/cilium/cilium:v1.14.10@sha256:0a1bcd2859c6d18d60dba6650cca8c707101716a3e47b126679040cbd621c031 + imagePullPolicy: IfNotPresent + name: clean-cilium-state + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.14.10@sha256:0a1bcd2859c6d18d60dba6650cca8c707101716a3e47b126679040cbd621c031 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + - emptyDir: {} + name: tmp + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /var/lib/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /var/run/multus/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + updateStrategy: + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/role.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/role.yaml new file mode 100644 index 00000000..6469cd59 --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/role.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/part-of: cilium + name: cilium-config-agent + namespace: cilium +rules: + - apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/rolebinding.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/rolebinding.yaml new file mode 100644 index 00000000..1d47a92c --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/rolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/part-of: cilium + name: cilium-config-agent + namespace: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: + - kind: ServiceAccount + name: cilium + namespace: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/service.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/service.yaml new file mode 100644 index 00000000..fc9fa1ab --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium + k8s-app: cilium + name: cilium-agent + namespace: cilium +spec: + clusterIP: None + ports: + - name: metrics + port: 9962 + protocol: TCP + targetPort: prometheus + - name: envoy-metrics + port: 9964 + protocol: TCP + targetPort: envoy-metrics + selector: + k8s-app: cilium + type: ClusterIP diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/serviceaccount.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/serviceaccount.yaml new file mode 100644 index 00000000..cd8696cf --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/servicemonitor.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/servicemonitor.yaml new file mode 100644 index 00000000..dde3d653 --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-agent/servicemonitor.yaml @@ -0,0 +1,27 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + annotations: null + labels: + app.kubernetes.io/part-of: cilium + name: cilium-agent + namespace: cilium +spec: + endpoints: + - honorLabels: true + interval: 10s + path: /metrics + port: metrics + relabelings: + - replacement: ${1} + sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + namespaceSelector: + matchNames: + - cilium + selector: + matchLabels: + k8s-app: cilium + targetLabels: + - k8s-app diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-configmap.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-configmap.yaml new file mode 100644 index 00000000..275b5c1c --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-configmap.yaml @@ -0,0 +1,110 @@ +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: 'false' + bpf-lb-external-clusterip: 'false' + bpf-lb-map-max: '65536' + bpf-lb-sock: 'true' + bpf-map-dynamic-size-ratio: '0.0025' + bpf-policy-map-max: '16384' + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-id: '0' + cluster-name: default + cluster-pool-ipv4-cidr: 10.128.0.0/14 + cluster-pool-ipv4-mask-size: '23' + cni-exclusive: 'true' + cni-log-file: /var/run/cilium/cilium-cni.log + cnp-node-status-gc-interval: 0s + custom-cni-conf: 'false' + debug: 'false' + debug-verbose: '' + direct-routing-device: ens+ + disable-cnp-status-updates: 'true' + dnsproxy-enable-transparent-mode: 'true' + egress-gateway-reconciliation-trigger-interval: 1s + enable-auto-protect-node-port-range: 'true' + enable-bgp-control-plane: 'false' + enable-bpf-clock-probe: 'false' + enable-bpf-masquerade: 'false' + enable-endpoint-health-checking: 'true' + enable-endpoint-routes: 'true' + enable-health-check-nodeport: 'false' + enable-health-checking: 'true' + enable-hubble: 'true' + enable-ipv4: 'true' + enable-ipv4-big-tcp: 'false' + enable-ipv4-masquerade: 'true' + enable-ipv6: 'false' + enable-ipv6-big-tcp: 'false' + enable-ipv6-masquerade: 'true' + enable-k8s-networkpolicy: 'true' + enable-k8s-terminating-endpoint: 'true' + enable-l2-neigh-discovery: 'true' + enable-l7-proxy: 'true' + enable-local-redirect-policy: 'false' + enable-policy: default + enable-remote-node-identity: 'true' + enable-sctp: 'false' + enable-session-affinity: 'true' + enable-svc-source-range-check: 'true' + enable-vtep: 'false' + enable-well-known-identities: 'false' + enable-xt-socket-fallback: 'true' + external-envoy-proxy: 'false' + hubble-disable-tls: 'true' + hubble-listen-address: :4244 + hubble-socket-path: /var/run/cilium/hubble.sock + identity-allocation-mode: crd + identity-gc-interval: 15m0s + identity-heartbeat-timeout: 30m0s + install-no-conntrack-iptables-rules: 'false' + ipam: cluster-pool + ipam-cilium-node-update-rate: 15s + k8s-client-burst: '10' + k8s-client-qps: '5' + kube-proxy-replacement: 'true' + kube-proxy-replacement-healthz-bind-address: '' + mesh-auth-enabled: 'true' + mesh-auth-gc-interval: 5m0s + mesh-auth-queue-size: '1024' + mesh-auth-rotated-identities-queue-size: '1024' + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: 'true' + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + preallocate-bpf-maps: 'false' + procfs: /host/proc + prometheus-serve-addr: :9962 + proxy-connect-timeout: '2' + proxy-max-connection-duration-seconds: '0' + proxy-max-requests-per-connection: '0' + proxy-prometheus-port: '9964' + remove-cilium-node-taints: 'true' + routing-mode: tunnel + set-cilium-is-up-condition: 'true' + set-cilium-node-taints: 'true' + sidecar-istio-proxy-image: cilium/istio_proxy + skip-cnp-status-startup-clean: 'false' + synchronize-k8s-nodes: 'true' + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: 'true' + tofqdns-endpoint-max-ip-per-hostname: '50' + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: '10000' + tofqdns-proxy-response-max-delay: 100ms + tunnel-protocol: vxlan + unmanaged-pod-watcher-interval: '15' + vtep-cidr: '' + vtep-endpoint: '' + vtep-mac: '' + vtep-mask: '' + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist +kind: ConfigMap +metadata: + name: cilium-config + namespace: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/clusterrole.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/clusterrole.yaml new file mode 100644 index 00000000..9909428c --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/clusterrole.yaml @@ -0,0 +1,190 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/part-of: cilium + name: cilium-operator +rules: + - apiGroups: + - '' + resources: + - pods + verbs: + - get + - list + - watch + - delete + - apiGroups: + - '' + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - '' + resources: + - nodes + - nodes/status + verbs: + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services/status + verbs: + - update + - patch + - apiGroups: + - '' + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumclusterwidenetworkpolicies + verbs: + - create + - update + - deletecollection + - patch + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + verbs: + - patch + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpoints + - ciliumidentities + verbs: + - delete + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update + - apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - create + - update + - get + - list + - watch + - delete + - apiGroups: + - cilium.io + resources: + - ciliumnodes/status + verbs: + - update + - apiGroups: + - cilium.io + resources: + - ciliumendpointslices + - ciliumenvoyconfigs + verbs: + - create + - update + - get + - list + - watch + - delete + - patch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch + - apiGroups: + - apiextensions.k8s.io + resourceNames: + - ciliumloadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io + resources: + - customresourcedefinitions + verbs: + - update + - apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumpodippools + verbs: + - get + - list + - watch + - apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create + - apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools/status + verbs: + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/clusterrolebinding.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/clusterrolebinding.yaml new file mode 100644 index 00000000..fa15a03c --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/part-of: cilium + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: + - kind: ServiceAccount + name: cilium-operator + namespace: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/deployment.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/deployment.yaml new file mode 100644 index 00000000..bc21885b --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/deployment.yaml @@ -0,0 +1,108 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium + io.cilium/app: operator + name: cilium-operator + name: cilium-operator + namespace: cilium +spec: + replicas: 2 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 50% + type: RollingUpdate + template: + metadata: + annotations: null + labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium + io.cilium/app: operator + name: cilium-operator + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + automountServiceAccountToken: true + containers: + - args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + command: + - cilium-operator-generic + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + image: quay.io/cilium/operator-generic:v1.14.10@sha256:415b7f0bb0e7339c6231d4b9ee74a6a513b2865acfccec884dbc806ecc3dd909 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + name: cilium-operator + readinessProbe: + failureThreshold: 5 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 5 + timeoutSeconds: 3 + resources: + limits: + cpu: 100m + memory: 250Mi + requests: + cpu: 100m + memory: 250Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + restartPolicy: Always + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + tolerations: + - operator: Exists + volumes: + - configMap: + name: cilium-config + name: cilium-config-path diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/serviceaccount.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/serviceaccount.yaml new file mode 100644 index 00000000..242642c9 --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-operator/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-secrets-namespace.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-secrets-namespace.yaml new file mode 100644 index 00000000..6d800eed --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/cilium-secrets-namespace.yaml @@ -0,0 +1,2 @@ +null +... diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/configmap.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/configmap.yaml new file mode 100644 index 00000000..7f549fea --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + config.yaml: "cluster-name: default\npeer-service: \"hubble-peer.cilium.svc.cluster.local:80\"\ + \nlisten-address: :4245\ngops: true\ngops-port: \"9893\"\ndial-timeout: \nretry-timeout:\ + \ \nsort-buffer-len-max: \nsort-buffer-drain-timeout: \ndisable-client-tls: true\n\ + disable-server-tls: true\n" +kind: ConfigMap +metadata: + name: hubble-relay-config + namespace: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/deployment.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/deployment.yaml new file mode 100644 index 00000000..5b9ed4f8 --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/deployment.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium + k8s-app: hubble-relay + name: hubble-relay + namespace: cilium +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: hubble-relay + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: null + labels: + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium + k8s-app: hubble-relay + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + automountServiceAccountToken: false + containers: + - args: + - serve + command: + - hubble-relay + image: quay.io/cilium/hubble-relay:v1.14.10@sha256:c156c4fc2da520d2876142ea17490440b95431a1be755d2050e72115a495cfd0 + imagePullPolicy: IfNotPresent + livenessProbe: + tcpSocket: + port: grpc + name: hubble-relay + ports: + - containerPort: 4245 + name: grpc + readinessProbe: + tcpSocket: + port: grpc + securityContext: + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hubble-relay + name: config + readOnly: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: null + restartPolicy: Always + securityContext: + fsGroup: 65532 + serviceAccount: hubble-relay + serviceAccountName: hubble-relay + terminationGracePeriodSeconds: 1 + volumes: + - configMap: + items: + - key: config.yaml + path: config.yaml + name: hubble-relay-config + name: config diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/service.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/service.yaml new file mode 100644 index 00000000..da8621dc --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: hubble-relay + app.kubernetes.io/part-of: cilium + k8s-app: hubble-relay + name: hubble-relay + namespace: cilium +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 4245 + selector: + k8s-app: hubble-relay + type: ClusterIP diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/serviceaccount.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/serviceaccount.yaml new file mode 100644 index 00000000..3d0be37c --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble-relay/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hubble-relay + namespace: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble/peer-service.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble/peer-service.yaml new file mode 100644 index 00000000..e492c74f --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/01_cilium_helmchart/cilium/templates/hubble/peer-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: hubble-peer + app.kubernetes.io/part-of: cilium + k8s-app: cilium + name: hubble-peer + namespace: cilium +spec: + internalTrafficPolicy: Local + ports: + - name: peer-service + port: 80 + protocol: TCP + targetPort: 4244 + selector: + k8s-app: cilium diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/02_aggregated_clusterroles.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/02_aggregated_clusterroles.yaml new file mode 100644 index 00000000..652b0ab1 --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/02_aggregated_clusterroles.yaml @@ -0,0 +1,67 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + name: syn-cilium-view + rbac.authorization.k8s.io/aggregate-to-admin: 'true' + rbac.authorization.k8s.io/aggregate-to-edit: 'true' + rbac.authorization.k8s.io/aggregate-to-view: 'true' + name: syn-cilium-view +rules: + - apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumendpoints + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + name: syn-cilium-edit + rbac.authorization.k8s.io/aggregate-to-admin: 'true' + rbac.authorization.k8s.io/aggregate-to-edit: 'true' + name: syn-cilium-edit +rules: + - apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + verbs: + - create + - delete + - deletecollection + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + name: syn-cilium-cluster-reader + rbac.authorization.k8s.io/aggregate-to-cluster-reader: 'true' + name: syn-cilium-cluster-reader +rules: + - apiGroups: + - cilium.io + resources: + - '*' + verbs: + - get + - list + - watch + - apiGroups: + - isovalent.com + resources: + - '*' + verbs: + - get + - list + - watch diff --git a/tests/golden/kubeproxyreplacement-strict/cilium/cilium/99_networkoperator_kube_proxy_patch.yaml b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/99_networkoperator_kube_proxy_patch.yaml new file mode 100644 index 00000000..855cbb69 --- /dev/null +++ b/tests/golden/kubeproxyreplacement-strict/cilium/cilium/99_networkoperator_kube_proxy_patch.yaml @@ -0,0 +1,22 @@ +apiVersion: redhatcop.redhat.io/v1alpha1 +kind: Patch +metadata: + annotations: + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true + labels: + name: network-cluster-72f42451a05fa70 + name: network-cluster-72f42451a05fa70 + namespace: syn-patch-operator +spec: + patches: + network-cluster-72f42451a05fa70-patch: + patchTemplate: |- + "spec": + "deployKubeProxy": false + patchType: application/merge-patch+json + targetObjectRef: + apiVersion: operator.openshift.io/v1 + kind: Network + name: cluster + serviceAccountRef: + name: patch-sa diff --git a/tests/kubeproxyreplacement-strict.yml b/tests/kubeproxyreplacement-strict.yml new file mode 100644 index 00000000..2fb431d0 --- /dev/null +++ b/tests/kubeproxyreplacement-strict.yml @@ -0,0 +1,18 @@ +parameters: + kapitan: + dependencies: + - type: https + source: https://raw.githubusercontent.com/projectsyn/component-patch-operator/v1.2.1/lib/patch-operator.libsonnet + output_path: vendor/lib/patch-operator.libsonnet + + facts: + distribution: openshift4 + + patch_operator: + patch_serviceaccount: + name: patch-sa + namespace: syn-patch-operator + + cilium: + cilium_helm_values: + kubeProxyReplacement: "true" From 3b1cb7598cfadb61324bc37c2111dddb1e0654be Mon Sep 17 00:00:00 2001 From: Simon Gerber Date: Tue, 9 Jul 2024 10:19:01 +0200 Subject: [PATCH 4/5] Update documentation --- docs/modules/ROOT/pages/references/parameters.adoc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/modules/ROOT/pages/references/parameters.adoc b/docs/modules/ROOT/pages/references/parameters.adoc index fd6d9be6..34bfeac5 100644 --- a/docs/modules/ROOT/pages/references/parameters.adoc +++ b/docs/modules/ROOT/pages/references/parameters.adoc @@ -175,6 +175,13 @@ See https://docs.cilium.io/en/{helm-minor-version}/helm-reference/[Opensource Ci The component will pre-process certain Helm values to allow users to more gracefully upgrade to newer Cilium versions which remove deprecated Helm values. +[NOTE] +==== +On OpenShift 4, the component will deploy a Patch which controls whether OpenShift deploys kube-proxy based on the value of `cilium_helm_values.kubeProxyReplacement`. +If the `kubeProxyReplacement` Helm value is set to `true` or `strict` the component will configure OpenShift to not deploy kube-proxy. +Otherwise, the component will configure OpenShift to deploy kube-proxy. +==== + == `hubble_enterprise_helm_values` [horizontal] From c480422ab901a3121192e1b410cf16f408e12de1 Mon Sep 17 00:00:00 2001 From: Simon Gerber Date: Tue, 9 Jul 2024 10:19:24 +0200 Subject: [PATCH 5/5] Remove unused test configurations --- tests/helm-enterprise-openshift.yml | 7 ------- tests/helm-enterprise.yml | 4 ---- 2 files changed, 11 deletions(-) delete mode 100644 tests/helm-enterprise-openshift.yml delete mode 100644 tests/helm-enterprise.yml diff --git a/tests/helm-enterprise-openshift.yml b/tests/helm-enterprise-openshift.yml deleted file mode 100644 index 89ad659d..00000000 --- a/tests/helm-enterprise-openshift.yml +++ /dev/null @@ -1,7 +0,0 @@ -parameters: - cilium: - install_method: helm - release: enterprise - - facts: - distribution: openshift4 diff --git a/tests/helm-enterprise.yml b/tests/helm-enterprise.yml deleted file mode 100644 index f87f519e..00000000 --- a/tests/helm-enterprise.yml +++ /dev/null @@ -1,4 +0,0 @@ -parameters: - cilium: - install_method: helm - release: enterprise