cilium install
ks edit cm cilium-config
# allow-localhost: "policy"
# enable-host-firewall: "true"
# enable-policy: always
# policy-audit-mode: "true"
# restart with new config
ks delete pods -l k8s-app=cilium
# Or just enable hubble, this restarts Cilium anyway
cilium hubble enable
k get nodes
# pick control plane node and set NODE_NAME=<name>
ks get pods -l "k8s-app=cilium" -o wide
# CILIUM_CP=<pod name on CP node>
# CILIUM_WORKER=<pod name on worker node>
# Listing endpoints
ks exec $CILIUM_POD -- cilium endpoint list
ks exec $CILIUM_POD -- cilium endpoint get <ID>
# Monitoring policy verdicts
ks exec $CILIUM_POD -- cilium monitor -t policy-verdict
# Observe flows that got caught by audit
cilium hubble port-forward &
hubble observe --verdict AUDIT
# curl.yaml pod in host network namespace, sleeps so we can exec into it
k exec -it curl -- sh
# Check node that pod's running on and look at endpoint list
nslookup example.com
ks exec $CILIUM_WORKER -- cilium monitor -t policy-verdict | grep <IP address>
# Label the node, look at it again in endpoint list
k label node kind-worker policy=example
# Apply ccnp-example.yaml policy to allow port 80 egress traffic from labelled node