Kind #15366
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Kind | |
# Any change in triggers needs to be reflected in the concurrency group. | |
on: | |
pull_request: {} | |
schedule: | |
- cron: '30 */2 * * *' | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.event.pull_request.number || 'scheduled' }} | |
cancel-in-progress: true | |
env: | |
# renovate: datasource=github-releases depName=kubernetes-sigs/kind | |
kind_version: v0.25.0 | |
kind_config: .github/kind-config.yaml | |
TIMEOUT: 2m | |
LOG_TIME: 30m | |
# renovate: datasource=github-releases depName=cilium/cilium | |
cilium_version: v1.16.4 | |
kubectl_version: v1.23.6 | |
jobs: | |
installation-and-connectivity: | |
name: Kind Installation and Connectivity Test | |
runs-on: ubuntu-24.04 | |
timeout-minutes: 50 | |
steps: | |
- name: Checkout | |
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
- name: Install kubectl | |
run: | | |
curl -sLO "https://dl.k8s.io/release/${{ env.kubectl_version }}/bin/linux/amd64/kubectl" | |
curl -sLO "https://dl.k8s.io/${{ env.kubectl_version }}/bin/linux/amd64/kubectl.sha256" | |
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check | |
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl | |
kubectl version --client | |
- name: Set up Go | |
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 | |
with: | |
# renovate: datasource=golang-version depName=go | |
go-version: 1.23.3 | |
- name: Install Cilium CLI | |
uses: ./ | |
with: | |
local-path: '.' | |
- name: Create kind cluster | |
uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0 | |
with: | |
version: ${{ env.kind_version }} | |
config: ${{ env.kind_config }} | |
wait: 0 # The control-plane never becomes ready, since no CNI is present | |
# Install Cilium with HostPort support and enables Prometheus for extended connectivity test. | |
- name: Install Cilium | |
run: | | |
cilium install \ | |
--version=${{ env.cilium_version }} \ | |
--nodes-without-cilium \ | |
--wait=false \ | |
--set bpf.monitorAggregation=none \ | |
--set cni.chainingMode=portmap \ | |
--set loadBalancer.l7.backend=envoy \ | |
--set tls.secretsBackend=k8s \ | |
--set prometheus.enabled=true \ | |
--set localRedirectPolicy=true \ | |
--set socketLB.enabled=true | |
- name: Enable Relay | |
run: | | |
cilium hubble enable --ui | |
cilium status --wait --interactive=false | |
- name: Relay Port Forward | |
run: | | |
cilium hubble port-forward& | |
sleep 10s | |
nc -nvz 127.0.0.1 4245 | |
- name: Set up external targets | |
id: external_targets | |
run: | | |
export worker2_ip=$(kubectl get nodes chart-testing-worker2 -o=jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') | |
export worker3_ip=$(kubectl get nodes chart-testing-worker3 -o=jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}') | |
echo "worker2_ip=${worker2_ip}" >> $GITHUB_OUTPUT | |
echo "worker3_ip=${worker3_ip}" >> $GITHUB_OUTPUT | |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.0/cert-manager.yaml | |
kubectl rollout status -n cert-manager deployment.apps/cert-manager | |
kubectl rollout status -n cert-manager deployment.apps/cert-manager-webhook | |
kubectl create ns external-targets | |
cat .github/external-targets/certs.yaml | envsubst | kubectl apply -n external-targets -f - | |
until kubectl get secret -n external-targets external-target-cert &> /dev/null; do sleep 1s; done | |
kubectl apply -n external-targets -f .github/external-targets/nginx.yaml | |
kubectl rollout status -n external-targets ds/nginx | |
- name: Set up node local DNS | |
run: | | |
kubedns=$(kubectl get svc kube-dns -n kube-system -o jsonpath={.spec.clusterIP}) && sed -i "s/__PILLAR__DNS__SERVER__/$kubedns/g;" .github/node-local-dns/node-local-dns.yaml | |
kubectl apply -k .github/node-local-dns | |
kubectl rollout status -n kube-system ds/node-local-dns | |
- name: Connectivity Test | |
run: | | |
# Setup the connectivity disruption tests. We don't really care about the result | |
# here (in the sense that we don't perform any operation which might cause a | |
# disruption), but we want to make sure that the command works as expected. | |
# | |
# Dispatch interval is set to 100ms, b/c otherwise (default is 0), the flow validation might time out. | |
cilium connectivity test --test-namespace test-namespace \ | |
--conn-disrupt-dispatch-interval 100ms \ | |
--include-conn-disrupt-test --conn-disrupt-test-setup | |
# Run the connectivity test in non-default namespace (i.e. not cilium-test) | |
cilium connectivity test --all-flows --test-namespace test-namespace \ | |
--test-concurrency=5 \ | |
--include-unsafe-tests --include-conn-disrupt-test \ | |
--collect-sysdump-on-failure --junit-file cilium-junit-1.xml \ | |
--junit-property type=no-tunnel \ | |
--curl-insecure \ | |
--external-target chart-testing-worker2 \ | |
--external-target-ca-namespace=external-targets \ | |
--external-target-ca-name=ca \ | |
--external-cidr 172.18.0.0/16 \ | |
--external-ip ${{ steps.external_targets.outputs.worker2_ip }} \ | |
--external-other-ip ${{ steps.external_targets.outputs.worker3_ip }} | |
- name: Uninstall node local DNS | |
run: | | |
kubectl delete -k .github/node-local-dns | |
- name: Uninstall cilium | |
run: | | |
pkill -f "cilium.*hubble.*port-forward|kubectl.*port-forward.*hubble-relay" || test $? -eq 1 | |
cilium uninstall --wait | |
- name: Install Cilium with IPsec Encryption | |
run: | | |
kubectl create -n kube-system secret generic cilium-ipsec-keys \ | |
--from-literal=keys="3 rfc4106(gcm(aes)) $(echo $(dd if=/dev/urandom count=20 bs=1 2> /dev/null | xxd -p -c 64)) 128" | |
cilium install \ | |
--version=${{ env.cilium_version}} \ | |
--nodes-without-cilium \ | |
--set encryption.enabled=true \ | |
--set encryption.type=ipsec \ | |
--set kubeProxyReplacement=false | |
- name: Enable Relay | |
run: | | |
cilium hubble enable | |
cilium status --wait --interactive=false | |
- name: Relay Port Forward | |
run: | | |
cilium hubble port-forward& | |
sleep 10s | |
nc -nvz 127.0.0.1 4245 | |
- name: Connectivity test | |
run: | | |
cilium connectivity test --force-deploy --all-flows --test-namespace test-namespace \ | |
--test-concurrency=5 \ | |
--include-unsafe-tests \ | |
--collect-sysdump-on-failure --junit-file cilium-junit-2.xml \ | |
--junit-property type=ipsec \ | |
--curl-insecure \ | |
--external-target chart-testing-worker2 \ | |
--external-target-ca-namespace=external-targets \ | |
--external-target-ca-name=ca \ | |
--external-cidr 172.18.0.0/16 \ | |
--external-ip ${{ steps.external_targets.outputs.worker2_ip }} \ | |
--external-other-ip ${{ steps.external_targets.outputs.worker3_ip }} | |
- name: Upload JUnit | |
if: ${{ always() }} | |
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 | |
with: | |
name: cilium-junits | |
path: cilium-junit*.xml | |
retention-days: 2 | |
- name: Cleanup | |
if: ${{ always() }} | |
run: | | |
cilium status | |
kubectl get pods --all-namespaces -o wide | |
cilium sysdump --output-filename cilium-sysdump-out --hubble-flows-count 10000 | |
shell: bash {0} # Disable default fail-fast behaviour so that all commands run independently | |
- name: Upload sysdump | |
if: ${{ !success() }} | |
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 | |
with: | |
name: cilium-sysdumps | |
path: cilium-sysdump-*.zip | |
retention-days: 5 | |
helm-upgrade-clustermesh: | |
name: Kind Helm Upgrade Clustermesh | |
runs-on: ubuntu-24.04 | |
timeout-minutes: 50 | |
env: | |
kind_config_1: .github/kind-config-1.yaml | |
kind_config_2: .github/kind-config-2.yaml | |
# helm/kind-action will override the "name:" provided in the kind config with "chart-testing" unless these are | |
# specified as inputs. These must also match the suffix here for CLUSTER1 and CLUSTER2. | |
CLUSTER_NAME_1: c-1 | |
CLUSTER_NAME_2: c-2 | |
CLUSTER1: kind-c-1 | |
CLUSTER2: kind-c-2 | |
steps: | |
- name: Checkout | |
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 | |
- name: Install kubectl | |
run: | | |
curl -sLO "https://dl.k8s.io/release/${{ env.kubectl_version }}/bin/linux/amd64/kubectl" | |
curl -sLO "https://dl.k8s.io/${{ env.kubectl_version }}/bin/linux/amd64/kubectl.sha256" | |
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check | |
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl | |
kubectl version --client | |
- name: Set up Go | |
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 | |
with: | |
# renovate: datasource=golang-version depName=go | |
go-version: 1.23.3 | |
- name: Install Cilium CLI | |
uses: ./ | |
with: | |
local-path: '.' | |
- name: Create kind cluster 1 | |
uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0 | |
with: | |
version: ${{ env.kind_version }} | |
config: ${{ env.kind_config_1 }} | |
cluster_name: ${{ env.CLUSTER_NAME_1 }} | |
wait: 0 # The control-plane never becomes ready, since no CNI is present | |
- name: Install Cilium on cluster 1 | |
run: | | |
cilium install --context $CLUSTER1 \ | |
--version=${{ env.cilium_version }} \ | |
--wait=true \ | |
--set bpf.monitorAggregation=none \ | |
--set cni.chainingMode=portmap \ | |
--set cluster.id=1 \ | |
--set cluster.name=$CLUSTER1 \ | |
--nodes-without-cilium | |
- name: Create kind cluster 2 | |
uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0 | |
with: | |
version: ${{ env.kind_version }} | |
config: ${{ env.kind_config_2 }} | |
cluster_name: ${{ env.CLUSTER_NAME_2 }} | |
wait: 0 # The control-plane never becomes ready, since no CNI is present | |
- name: Install Cilium on cluster 2 | |
run: | | |
cilium install --context $CLUSTER2 \ | |
--version=${{ env.cilium_version }} \ | |
--wait=true \ | |
--set bpf.monitorAggregation=none \ | |
--set cni.chainingMode=portmap \ | |
--set cluster.id=2 \ | |
--set cluster.name=$CLUSTER2 | |
- name: Enable ClusterMesh on cluster 1 using helm-based upgrade | |
run: | | |
cilium upgrade --reuse-values --context $CLUSTER1 \ | |
--wait=true \ | |
--set clustermesh.useAPIServer=true \ | |
--set clustermesh.apiserver.service.type=NodePort \ | |
--set clustermesh.apiserver.tls.server.extraDnsNames={"$CLUSTER1.mesh.cilium.io,$CLUSTER2.mesh.cilium.io"} | |
- name: Copy CA cert from cluster 1 to cluster 2 | |
run: | | |
kubectl --context $CLUSTER2 delete secret -n kube-system cilium-ca && \ | |
kubectl --context $CLUSTER1 get secrets -n kube-system cilium-ca -oyaml \ | |
| kubectl --context $CLUSTER2 apply -f - | |
# Restart Cilium on cluster 2 | |
kubectl --context $CLUSTER2 delete pod -l app.kubernetes.io/part-of=cilium -A | |
- name: Enable ClusterMesh on cluster 2 using helm-based upgrade | |
run: | | |
cilium upgrade --reuse-values --context $CLUSTER2 \ | |
--wait=true \ | |
--set clustermesh.useAPIServer=true \ | |
--set clustermesh.apiserver.service.type=NodePort \ | |
--set clustermesh.apiserver.tls.server.extraDnsNames={"$CLUSTER1.mesh.cilium.io,$CLUSTER2.mesh.cilium.io"} | |
- name: Rename the secrets expected by the clustermesh connect command | |
run: | | |
kubectl get secrets --context $CLUSTER1 \ | |
-n kube-system clustermesh-apiserver-remote-cert -oyaml \ | |
| sed 's/name: .*/name: clustermesh-apiserver-client-cert/' \ | |
| kubectl apply --context $CLUSTER1 -f - | |
kubectl get secrets --context $CLUSTER2 \ | |
-n kube-system clustermesh-apiserver-remote-cert -oyaml \ | |
| sed 's/name: .*/name: clustermesh-apiserver-client-cert/' \ | |
| kubectl apply --context $CLUSTER2 -f - | |
- name: Connect the two clusters using clustermesh | |
run: | | |
cilium clustermesh connect --context $CLUSTER1 --destination-context $CLUSTER2 | |
cilium clustermesh status --context $CLUSTER1 --wait | |
- name: Run the multicluster connectivity tests | |
run: | | |
# Remove unused docker stuff to avoid running out of disk space. | |
docker system prune -fa | |
# Setup the connectivity disruption tests. We don't really care about the result | |
# here (in the sense that we don't perform any operation which might cause a | |
# disruption), but we want to make sure that the command works as expected. | |
# | |
# Dispatch interval is set to 100ms, b/c otherwise (default is 0), the flow validation might time out. | |
cilium connectivity test --context $CLUSTER1 --multi-cluster $CLUSTER2 \ | |
--conn-disrupt-dispatch-interval 100ms \ | |
--include-conn-disrupt-test --conn-disrupt-test-setup | |
cilium connectivity test --context $CLUSTER1 --multi-cluster $CLUSTER2 \ | |
--include-unsafe-tests --include-conn-disrupt-test \ | |
--collect-sysdump-on-failure --junit-file cilium-junit-clustermesh-1.xml \ | |
--junit-property mode=clustermesh --junit-property type=ipsec | |
- name: Upload JUnit | |
if: ${{ always() }} | |
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 | |
with: | |
name: cilium-junits-helm-upgrade-clustermesh | |
path: cilium-junit*.xml | |
retention-days: 2 | |
- name: Cleanup | |
if: ${{ always() }} | |
run: | | |
cilium --context $CLUSTER1 status | |
kubectl --context $CLUSTER1 get pods --all-namespaces -o wide | |
cilium --context $CLUSTER1 sysdump --output-filename cilium-sysdump-out-c1 | |
cilium --context $CLUSTER2 status | |
kubectl --context $CLUSTER2 get pods --all-namespaces -o wide | |
cilium --context $CLUSTER2 sysdump --output-filename cilium-sysdump-out-c2 | |
shell: bash {0} # Disable default fail-fast behaviour so that all commands run independently | |
- name: Upload sysdump from cluster 1 | |
if: ${{ !success() }} | |
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 | |
with: | |
name: cilium-sysdump-out-c1.zip | |
path: cilium-sysdump-out-c1.zip | |
retention-days: 5 | |
- name: Upload sysdump from cluster 2 | |
if: ${{ !success() }} | |
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 | |
with: | |
name: cilium-sysdump-out-c2.zip | |
path: cilium-sysdump-out-c2.zip | |
retention-days: 5 |