diff --git a/.github/workflows/check-links.yaml b/.github/workflows/check-links.yaml index 74f5078bf..59ee77c25 100644 --- a/.github/workflows/check-links.yaml +++ b/.github/workflows/check-links.yaml @@ -11,17 +11,17 @@ jobs: linkChecker: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - name: Check unrendered links id: lychee_unrendered - uses: lycheeverse/lychee-action@2b973e86fc7b1f6b36a93795fe2c9c6ae1118621 # v1.10.0 + uses: lycheeverse/lychee-action@2bb232618be239862e31382c5c0eaeba12e5e966 # v2.0.1 env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} with: fail: true debug: false - args: --no-progress --include-fragments --github-token ${{secrets.GITHUB_TOKEN}} -c lychee.toml -E content/ + args: --no-progress --include-fragments --github-token ${{secrets.GITHUB_TOKEN}} --config config/lychee.toml -E content/ # Deactivated. The --include-fragments flag is causing failures because rendered links # have a trailing '#' which is probably a result of the link style change plus the new @@ -48,7 +48,7 @@ jobs: # - name: Check rendered links # id: lychee_rendered - # uses: lycheeverse/lychee-action@2b973e86fc7b1f6b36a93795fe2c9c6ae1118621 # v1.10.0 + # uses: lycheeverse/lychee-action@2bb232618be239862e31382c5c0eaeba12e5e966 # v2.0.1 # env: # GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} # with: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 968ee8f44..34344b9b8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -72,7 +72,7 @@ Kyverno maintains a thriving community with two different opportunities to parti #### Community Meetings -For the available Kyverno meetings, see [here](https://kyverno.io/community/#community-meetings). +For the available Kyverno meetings, see [here](https://kyverno.io/community/#meetings). ## Developer Certificate of Origin (DCO) Sign off diff --git a/OWNERS.md b/OWNERS.md new file mode 100644 index 000000000..49e30e334 --- /dev/null +++ b/OWNERS.md @@ -0,0 +1,15 @@ +approvers: +- JimBugwadia +- realshuting +- eddycharly +- fjogeleit +- MariamFahmy98 +- vishal-chdhry + +reviewers: +- JimBugwadia +- realshuting +- eddycharly +- MariamFahmy98 +- vishal-chdhry +- fjogeleit \ No newline at end of file diff --git a/README.md b/README.md index 3cadbb4c1..f6e693df5 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,15 @@ hugo server By default, Hugo runs the website at: http://localhost:1313 and will re-build the site on changes. +**Note for Github Codespaces User:** You will be required to install the hugo extended version. To do so download the extended version from [hugo release](https://github.com/gohugoio/hugo/releases) based on your operation system (mostly it is Ubuntu for Codespaces). Use the below commands to install and then move the hugo directory to `usr/local/hugo/bin/hugo` +``` +wget https://github.com/gohugoio/hugo/releases/download/v0.135.0/hugo_extended_0.135.0_linux-amd64.deb +sudo dpkg -i hugo_extended_0.135.0_linux-amd64.deb +rm hugo_extended_0.135.0_linux-amd64.deb +sudo mv /usr/local/bin/hugo /usr/local/hugo/bin/hugo +``` +Finally, Check the hugo version by running: `hugo version` + ## Update Docsy theme The project uses [Hugo Modules](https://gohugo.io/hugo-modules/) to manage the theme: diff --git a/assets/scss/_styles_project.scss b/assets/scss/_styles_project.scss index d926279cd..8673ac750 100644 --- a/assets/scss/_styles_project.scss +++ b/assets/scss/_styles_project.scss @@ -2,6 +2,14 @@ body { scroll-behavior: smooth; overscroll-behavior: none; } +a { + text-decoration: none; +} + +.alert { + max-width: 100% !important; +} + .td-navbar { min-height: auto; } @@ -509,6 +517,7 @@ code.noClass { overflow: initial; display: initial; margin: 0; + border: none; } .highlight table { @@ -897,4 +906,4 @@ code.noClass { .markmap > svg { width: 100%; height: 300px; -} \ No newline at end of file +} diff --git a/config/_default/menus/menu.en.toml b/config/_default/menus/menu.en.toml index 1fe1a7082..679edcdd5 100644 --- a/config/_default/menus/menu.en.toml +++ b/config/_default/menus/menu.en.toml @@ -3,7 +3,7 @@ [[main]] name = "About" weight = -103 - url = "#kyverno-is-a-policy-engine-designed-for-kubernetes" + url = "#about-kyverno" [[main]] name = "Documentation" @@ -15,11 +15,6 @@ weight = -101 url = "/policies" -[[main]] - name = "Resources" - weight = -99 - url = "/resources" - [[main]] name = "Playground" weight = -98 diff --git a/lychee.toml b/config/lychee.toml similarity index 100% rename from lychee.toml rename to config/lychee.toml diff --git a/content/en/_index.md b/content/en/_index.md index e57080094..8483e4f5c 100644 --- a/content/en/_index.md +++ b/content/en/_index.md @@ -4,17 +4,17 @@ linkTitle = "Kyverno" +++ {{< blocks/cover title="Kyverno" image_anchor="top" height="full" color="dark" >}} -# Kubernetes Native Policy Management { class="text-center" } +# Policy as Code, Simplified! { class="text-center" }
- + Learn More Get Started - +
{{< /blocks/cover >}} @@ -23,18 +23,29 @@ linkTitle = "Kyverno" {{% blocks/lead color="light" %}}
-# Kyverno is a policy engine **designed for Kubernetes** { class="text-center" } - +# About Kyverno { class="text-center" } +

+ +

+The Kyverno project provides a comprehensive set of tools to manage the complete Policy-as-Code (PaC) lifecycle for Kubernetes and other cloud native environments +


-Policies are managed as Kubernetes resources and **no new language is required** to write policies. +

+ +Kyverno policies are declarative YAML resources and no new language is required. Kyverno enables use of familiar tools such as kubectl, git, and kustomize to manage policies. Kyverno supports JMESPath and the Common Expressions Language (CEL) for efficient handling of complex logic. -This allows using familiar tools such as kubectl, git, and kustomize to manage policies. +In Kubernetes environments, Kyverno policies can validate, mutate, generate, and cleanup any Kubernetes resource, including custom resources. To help secure the software supply chain Kyverno policies can verify OCI container image signatures and artifacts. Kyverno policy reports and policy exceptions are also Kubernetes API resources. -Kyverno policies can **validate, mutate, generate, and cleanup** Kubernetes resources, and **verify image** signatures and artifacts to help secure the software supply chain. +The **Kyverno CLI** can be used to apply and test policies off-cluster e.g., as part of an IaC and CI/CD pipelines. -The Kyverno CLI can be used to test policies and validate resources as part of a **CI/CD pipeline**. +**Kyverno Policy Reporter** provides report management with a graphical web-based user interface. + +**Kyverno JSON** allows applying Kyverno policies in non-Kubernetes environments and on any JSON payload. + +**Kyverno Chainsaw** provides declarative end-to-end testing for policies. +

diff --git a/content/en/blog/general/assigning-node-metadata-to-pods/index.md b/content/en/blog/general/assigning-node-metadata-to-pods/index.md index 3dd7b9592..098b4ff69 100644 --- a/content/en/blog/general/assigning-node-metadata-to-pods/index.md +++ b/content/en/blog/general/assigning-node-metadata-to-pods/index.md @@ -390,6 +390,9 @@ spec: # https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone topology.kubernetes.io/zone: "{{ ZoneLabel }}" ``` +### Credits + +Thanks to [Abir Sigron](https://github.com/abirsigron) for initiating the idea on Slack and conducting a POC. ## Closing diff --git a/content/en/blog/general/introducing-kyverno-envoy-plugin/arch-pod.png b/content/en/blog/general/introducing-kyverno-envoy-plugin/arch-pod.png new file mode 100644 index 000000000..3adc1f6aa Binary files /dev/null and b/content/en/blog/general/introducing-kyverno-envoy-plugin/arch-pod.png differ diff --git a/content/en/blog/general/introducing-kyverno-envoy-plugin/arch-seprate-pod.png b/content/en/blog/general/introducing-kyverno-envoy-plugin/arch-seprate-pod.png new file mode 100644 index 000000000..ee7334c3e Binary files /dev/null and b/content/en/blog/general/introducing-kyverno-envoy-plugin/arch-seprate-pod.png differ diff --git a/content/en/blog/general/introducing-kyverno-envoy-plugin/index.md b/content/en/blog/general/introducing-kyverno-envoy-plugin/index.md new file mode 100644 index 000000000..ca1e555ce --- /dev/null +++ b/content/en/blog/general/introducing-kyverno-envoy-plugin/index.md @@ -0,0 +1,236 @@ +--- +date: 2024-06-04 +title: Kyverno-Envoy-Plugin - Kyverno policies based authorization plugin for Envoy +linkTitle: Kyverno-Envoy-Plugin - Kyverno policies based authorization plugin for Envoy +author: Sanskar Gurdasani +description: Make external authorization easy with Kyverno-Envoy-Plugin. +--- + +![Kyverno-Envoy-Plugin](logo.png) + +Microservices enhance the productivity of individual development teams by dividing applications into smaller, independent components. However, microservices alone do not address longstanding challenges in distributed systems such as authentication and authorization. These problems can become even harder to manage due to the diverse and short-lived nature of the microservice environments. + +As more organizations move to using microservices, there is an increasing need for separate authentication and authorization mechanisms that work across different microservices. + +In this blog post, we will introduce [Kyverno-Envoy-Plugin](https://github.com/kyverno/kyverno-envoy-plugin), how it works and how you can use this version of Kyverno to enforce fine-grained, context-aware access control policies with Envoy without modifying your microservice or application code. + +## What is Envoy + +[Envoy](https://www.envoyproxy.io/docs/envoy/latest/intro/what_is_envoy) is a Layer 7 proxy and communication bus tailored for large-scale, modern service-oriented architectures. Starting from version 1.7.0, Envoy includes an [External Authorization filter](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ext_authz_filter.html) that interfaces with an authorization service to check if the incoming request is authorized or not. This functionality allows authorization decisions to be offloaded to an external service, which can access the request context. The request context includes details such as the origin and destination of the network activity, as well as specifics of the network request (e.g., HTTP request). This information enables the external service to make a well-informed decision regarding the authorization of the incoming request processed by Envoy. + +## What is Kyverno-Envoy Plugin + +[Kyverno-Envoy](https://github.com/kyverno/kyverno-envoy-plugin) plugin extends [Kyverno-JSON](https://kyverno.github.io/kyverno-json/latest/) with a gRPC server that implements [Envoy External Authorization API](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ext_authz_filter.html). This allows you to enforce Kyverno policies on incoming and outgoing traffic in a service mesh environment, providing an additional layer of security and control over your applications. You can use this version of Kyverno to enforce fine-grained, context-aware access control policies with Envoy without modifying your microservice. + +## How does this work + +In addition to the Envoy sidecar, your application pods will include a Kyverno-Envoy-Plugin component, either as a sidecar or as a separate pod. This Kyverno-Envoy-Plugin will be configured to communicate with the Kyverno-Envoy-Plugin gRPC server. When Envoy receives an API request intended for your microservice, it consults the Kyverno-Envoy-Plugin server to determine whether the request should be permitted. + +Here is the architecture when Kyverno-Envoy-Plugin deployed as a sidecar to your application: + +![architecture-sidecar](./arch-pod.png) + +Here is the architecture when Kyverno-Envoy-Plugin deployed as a separate pod to your application: + +![architecture-pod](./arch-seprate-pod.png) + +Performing policy evaluations locally with Envoy is advantageous, as it eliminates the need for an additional network hop for authorization checks, thus enhancing both performance and availability. + +The Kyverno-Envoy-Plugin can be deployed with Envoy-based sevice meshes such as [Istio](https://istio.io/), [Gloo](https://gloo.solo.io/), [Kuma](https://kuma.io/) etc. + +## Getting started + +In this blog, we will deploy the Kyverno-Envoy-Plugin as a sidecar container next to the application container. The plugin will handle authorizing incoming requests to the application. Additionally, [documentation](https://kyverno.github.io/kyverno-envoy-plugin/dev/) is provided for deploying the plugin as a separate pod. + +Before we can look at Kyverno-Envoy-Plugin we need a Kubernetes cluster. We can create a local cluster with [minikube](https://minikube.sigs.k8s.io/docs/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +### Create a local cluster + +Start minikube cluster with the following command: + +```sh +minikube start +``` + +### Install Kyverno-Envoy-Plugin sidecar with application + +Install application with Envoy and Kyverno-Envoy-Plugin as a sidecar container. + +```sh +$ kubectl apply -f https://raw.githubusercontent.com/kyverno/kyverno-envoy-plugin/main/quick_start.yaml +``` + +The `applicaition.yaml` manifest defines the following resource: + +- The Deployment includes an example Go application that provides information of books in the library books collection and exposes APIs to `get`, `create` and `delete` books collection. Check this out for more information about the [Go test application](https://github.com/Sanskarzz/kyverno-envoy-demos/tree/main/test-application) . + +- The Deployment also includes a Kyverno-Envoy-Plugin sidecar container in addition to the Envoy sidecar container. When Envoy recevies API request destined for the Go test applicaiton, it will check with Kyverno-Envoy-Plugin to decide if the request should be allowed and the Kyverno-Envoy-Plugin sidecar container is configured to query Kyverno-JSON engine for policy decisions on incoming requests. + +- A ConfigMap `policy-config` is used to pass the policy to Kyverno-Envoy-Plugin sidecar in the namespace `default` where the application is deployed . + +- A ConfigMap `envoy-config` is used to pass an Envoy configuration with an External Authorization Filter to direct authorization checks to the Kyverno-Envoy-Plugin sidecar. + +- The Deployment also includes an init container that install iptables rules to redirect all container traffic to the Envoy proxy sidecar container , more about init container can be found [here](https://github.com/kyverno/kyverno-envoy-plugin/tree/main/demo/standalone-envoy/envoy_iptables) + +### Make Test application accessible in the cluster + +```console +kubectl expose deployment testapp --type=NodePort --name=testapp --port=8080 +``` + +### Set the `SERVICE_URL` environment variable to the service's IP/port + +minikube: + +```sh +export SERVICE_PORT=$(kubectl get service testapp -o jsonpath='{.spec.ports[?(@.port==8080)].nodePort}') +export SERVICE_HOST=$(minikube ip) +export SERVICE_URL=$SERVICE_HOST:$SERVICE_PORT +echo $SERVICE_URL +``` + +### Calling the sample test application and verify the authorization + +For convenience, we’ll store Alice’s and Bob’s tokens in environment variables (not recommended for production). Here Bob is assigned the `admin` role and Alice is assigned the `guest` role. + +```bash +export ALICE_TOKEN="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIyNDEwODE1MzksIm5iZiI6MTUxNDg1MTEzOSwicm9sZSI6Imd1ZXN0Iiwic3ViIjoiWVd4cFkyVT0ifQ.ja1bgvIt47393ba_WbSBm35NrUhdxM4mOVQN8iXz8lk" +export BOB_TOKEN="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIyNDEwODE1MzksIm5iZiI6MTUxNDg1MTEzOSwicm9sZSI6ImFkbWluIiwic3ViIjoiWVd4cFkyVT0ifQ.veMeVDYlulTdieeX-jxFZ_tCmqQ_K8rwx2OktUHv5Z0" +``` + +The policy we passed to Kyverno-Envoy-Plugin sidecar in the ConfigMap `policy-config` is configured to check the conditions of the incoming request and denies the request if the user is a guest and the request method is `POST` at the `/book` path. + +```yaml +apiVersion: json.kyverno.io/v1alpha1 +kind: ValidatingPolicy +metadata: + name: checkrequest +spec: + rules: + - name: deny-guest-request-at-post + assert: + any: + - message: "POST method calls at path /book are not allowed to guests users" + check: + request: + http: + method: POST + headers: + authorization: + (split(@, ' ')[1]): + (jwt_decode(@ , 'secret').payload.role): admin + path: /book + - message: "GET method call is allowed to both guest and admin users" + check: + request: + http: + method: GET + headers: + authorization: + (split(@, ' ')[1]): + (jwt_decode(@ , 'secret').payload.role): admin + path: /book + - message: "GET method call is allowed to both guest and admin users" + check: + request: + http: + method: GET + headers: + authorization: + (split(@, ' ')[1]): + (jwt_decode(@ , 'secret').payload.role): guest + path: /book +``` + +Check for `Alice` which can get book but cannot create book. + +```bash +curl -i -H "Authorization: Bearer "$ALICE_TOKEN"" http://$SERVICE_URL/book +``` + +```bash +curl -i -H "Authorization: Bearer "$ALICE_TOKEN"" -d '{"bookname":"Harry Potter", "author":"J.K. Rowling"}' -H "Content-Type: application/json" -X POST http://$SERVICE_URL/book +``` + +Check the `Bob` which can get book also create the book + +```bash +curl -i -H "Authorization: Bearer "$BOB_TOKEN"" http://$SERVICE_URL/book +``` + +```bash +curl -i -H "Authorization: Bearer "$BOB_TOKEN"" -d '{"bookname":"Harry Potter", "author":"J.K. Rowling"}' -H "Content-Type: application/json" -X POST http://$SERVICE_URL/book +``` + +Check on logs + +```bash +kubectl logs "$(kubectl get pod -l app=testapp -o jsonpath={.items..metadata.name})" -c kyverno-envoy-plugin -f +``` + +First , third and last request is passed but second request is failed. + +```console +$ kubectl logs "$(kubectl get pod -l app=testapp -n demo -o jsonpath={.items..metadata.name})" -n demo -c kyverno-envoy-plugin -f +Starting HTTP server on Port 8000 +Starting GRPC server on Port 9000 +Request is initialized in kyvernojson engine . +2024/04/26 17:11:42 Request passed the deny-guest-request-at-post policy rule. +Request is initialized in kyvernojson engine . +2024/04/26 17:22:11 Request violation: -> POST method calls at path /book are not allowed to guests users + -> any[0].check.request.http.headers.authorization.(split(@, ' ')[1]).(jwt_decode(@ , 'secret').payload.role): Invalid value: "guest": Expected value: "admin" +-> GET method call is allowed to both guest and admin users + -> any[1].check.request.http.headers.authorization.(split(@, ' ')[1]).(jwt_decode(@ , 'secret').payload.role): Invalid value: "guest": Expected value: "admin" + -> any[1].check.request.http.method: Invalid value: "POST": Expected value: "GET" +-> GET method call is allowed to both guest and admin users + -> any[2].check.request.http.method: Invalid value: "POST": Expected value: "GET" +Request is initialized in kyvernojson engine . +2024/04/26 17:23:13 Request passed the deny-guest-request-at-post policy rule. +Request is initialized in kyvernojson engine . +2024/04/26 17:23:55 Request passed the deny-guest-request-at-post policy rule. +``` + +### Configuration + +To deploy Kyverno-Envoy-Plugin include the following container in your Kubernetes Deployments: + +```yaml +- name: kyverno-envoy-plugin + image: sanskardevops/plugin:0.0.34 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8181 + - containerPort: 9000 + volumeMounts: + - readOnly: true + args: + - "serve" + - "--policy=/policies/policy.yaml" + - "--address=:9000" + - "--healthaddress=:8181" + livenessProbe: + httpGet: + path: /health + scheme: HTTP + port: 8181 + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /health + scheme: HTTP + port: 8181 + initialDelaySeconds: 5 + periodSeconds: 5 +``` + +## Conclusion + +This blog post demonstrates how the Kyverno-Envoy plugin can be effectively used to make external authorization decisions for incoming requests in a microservice architecture. By leveraging the power of Kyverno policies and Envoy's External Authorization filter, you can achieve fine-grained, context-aware access control without modifying your application code. This approach not only simplifies the management of security policies but also enhances the security posture of your services by ensuring that authorization checks are consistently applied across your microservices. + +The Kyverno-Envoy-Plugin provides a robust solution for organizations looking to enforce policy-driven access controls in their service meshes. By following the steps outlined in this guide, you can easily deploy and configure the plugin, allowing you to take full advantage of Kyverno's policy capabilities in conjunction with Envoy's powerful proxy features. + +For further exploration, you can checkout: + +- 🔗 Check out the project on GitHub: https://github.com/kyverno/kyverno-envoy-plugin +- 📚 Browse the documentation: https://kyverno.github.io/kyverno-envoy-plugin diff --git a/content/en/blog/general/introducing-kyverno-envoy-plugin/logo.png b/content/en/blog/general/introducing-kyverno-envoy-plugin/logo.png new file mode 100644 index 000000000..f997af7cd Binary files /dev/null and b/content/en/blog/general/introducing-kyverno-envoy-plugin/logo.png differ diff --git a/content/en/blog/general/introducing-reports-server/index.md b/content/en/blog/general/introducing-reports-server/index.md index fa073b312..ced60974f 100644 --- a/content/en/blog/general/introducing-reports-server/index.md +++ b/content/en/blog/general/introducing-reports-server/index.md @@ -121,10 +121,6 @@ The manifest will install the following components: Reports server comes with a PostgreSQL database, but you may opt for finer control of the database configuration by bringing your own database. See the [database configuration guide](https://github.com/kyverno/reports-server/blob/main/docs/DBCONFIG.md) for more details. -### Migration - -If you already have the PolicyReport CRD installed in your cluster, you will have an existing API service managed by kube-aggregator that sends requests to the Kubernetes API server. You will have to update the existing API service to send request to reports server. See the [migration guide](https://github.com/kyverno/reports-server/blob/main/docs/MIGRATION.md) for more details. - ## Conclusion In this short blog post, we demonstrated how reports server can be used to store policy reports. diff --git a/content/en/blog/general/slsa-3/index.md b/content/en/blog/general/slsa-3/index.md index 045152b79..461f44347 100644 --- a/content/en/blog/general/slsa-3/index.md +++ b/content/en/blog/general/slsa-3/index.md @@ -154,7 +154,7 @@ The GitHub Action reusable workflow hosted by the SLSA GitHub Generator project The build service's users can not falsify the provenance. **Kyverno Processes:** -GitHub takes care of avoiding interference with the build system. GitHub uses ephemeral and isolated virtual machines, no one can persistently compromise this environment. GitHub automatically provisions a new VM for that job. When the job execution is finished, the VM is automatically decommissioned. Use of the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) separates the signing from building so the Kyverno build itself never has access to the signing secrets. Use of OIDC-based secrets through Sigstore's [keyless signing](https://docs.sigstore.dev/signing/overview/) means the ephemeral signing secret is associated only with one specific build making it easy to detect secret theft and an attempt at signing something else. +GitHub takes care of avoiding interference with the build system. GitHub uses ephemeral and isolated virtual machines, no one can persistently compromise this environment. GitHub automatically provisions a new VM for that job. When the job execution is finished, the VM is automatically decommissioned. Use of the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) separates the signing from building so the Kyverno build itself never has access to the signing secrets. Use of OIDC-based secrets through Sigstore's [keyless signing](https://docs.sigstore.dev/cosign/signing/overview/) means the ephemeral signing secret is associated only with one specific build making it easy to detect secret theft and an attempt at signing something else. ### Provenance Content Requirements diff --git a/content/en/blog/general/why-chainsaw-is-unique/index.md b/content/en/blog/general/why-chainsaw-is-unique/index.md index b38450952..a374c948b 100644 --- a/content/en/blog/general/why-chainsaw-is-unique/index.md +++ b/content/en/blog/general/why-chainsaw-is-unique/index.md @@ -120,14 +120,14 @@ spec: template: spec: (containers[?securityContext == null]): - (len(@)): 0 + (length(@)): 0 ``` In the assertion above, the first three fields `spec`, `template`, and `spec` are basic projections that simply take the content of their respective fields and pass that to descendants. -`(containers[?securityContext == null])` is a JMESPath expression filtering the `containers` array, selecting only the element where `securityContext` is `null`. This projection results in a new array that is passed to the descendant (`(len(@))` in this case). +`(containers[?securityContext == null])` is a JMESPath expression filtering the `containers` array, selecting only the element where `securityContext` is `null`. This projection results in a new array that is passed to the descendant (`(length(@))` in this case). -`(len(@))` is another JMESPath expression that computes the length of the array. There's no more descendant at this point. We're at a leaf of the YAML tree and the array length we just computed is then compared against 0. +`(length(@))` is another JMESPath expression that computes the length of the array. There's no more descendant at this point. We're at a leaf of the YAML tree and the array length we just computed is then compared against 0. If the comparison matches, the assertion will be considered valid; if not, it will be considered failed. diff --git a/content/en/blog/releases/1-10-0/index.md b/content/en/blog/releases/1-10-0/index.md index 45308e915..598dd1f8f 100644 --- a/content/en/blog/releases/1-10-0/index.md +++ b/content/en/blog/releases/1-10-0/index.md @@ -158,6 +158,6 @@ For these breaking changes, and others, please carefully read the extensive and ## Closing -Kyverno 1.10 is quite the loaded release as you can probably see. After about four months and close to 500 PRs, there were a tremendous number of changes from the Kyverno community. And if you were one of the many, many contributors who pitched in to make this release a reality, a hearty THANK YOU for all your work! Hopefully what you've seen makes you excited to try out 1.10 for yourself. Come engage with us in the Kyverno channel on [Kubernetes Slack](/community/#slack-channel), attend one of our [community meetings](/community/#community-meetings), or just catch us on [Twitter](https://twitter.com/kyverno). +Kyverno 1.10 is quite the loaded release as you can probably see. After about four months and close to 500 PRs, there were a tremendous number of changes from the Kyverno community. And if you were one of the many, many contributors who pitched in to make this release a reality, a hearty THANK YOU for all your work! Hopefully what you've seen makes you excited to try out 1.10 for yourself. Come engage with us in the Kyverno channel on [Kubernetes Slack](../../../community/_index.md#slack-channel), attend one of our [community meetings](../../../community/_index.md#meetings), or just catch us on [Twitter](https://twitter.com/kyverno). And if you're already a Kyverno adopter, sign up to be an official adopter by updating the Adopters form [here](https://github.com/kyverno/kyverno/blob/main/ADOPTERS.md). diff --git a/content/en/blog/releases/1-9-0/index.md b/content/en/blog/releases/1-9-0/index.md index 4f5b32268..2c51e3dd8 100644 --- a/content/en/blog/releases/1-9-0/index.md +++ b/content/en/blog/releases/1-9-0/index.md @@ -140,4 +140,4 @@ One change we do want to make you aware of, which actually came in 1.8.3, which ## Closing -With so many new features, enhancements, and well over 200 fixes, there's so much to get excited about in this release. Hopefully what you've seen makes you excited to try out the 1.9 release and provide your feedback. Come engage with us in the Kyverno channel on [Kubernetes Slack](../../../community/_index.md#slack-channel), attend one of our [community meetings](../../../community/_index.md#community-meetings), or just catch us on [Twitter](https://twitter.com/kyverno). +With so many new features, enhancements, and well over 200 fixes, there's so much to get excited about in this release. Hopefully what you've seen makes you excited to try out the 1.9 release and provide your feedback. Come engage with us in the Kyverno channel on [Kubernetes Slack](../../../community/_index.md#slack-channel), attend one of our [community meetings](../../../community/_index.md#meetings), or just catch us on [Twitter](https://twitter.com/kyverno). diff --git a/content/en/community/_index.md b/content/en/community/_index.md index 9a728f447..e23df12e0 100644 --- a/content/en/community/_index.md +++ b/content/en/community/_index.md @@ -10,36 +10,30 @@ The [Kyverno source code](https://github.com/kyverno/kyverno/) and project artif ## Slack Channel -Join the Kubernetes Slack workspace at [https://slack.k8s.io/](https://slack.k8s.io/) and then search for the [#kyverno](https://slack.k8s.io/#kyverno) channel. +Kyverno maintains a thriving community with two different opportunities to participate. The largest is the [Kubernetes Slack workspace](https://slack.k8s.io/#kyverno), where end-users engage in the [#kyverno](https://slack.k8s.io/#kyverno) channel and contributors collaborate in the [#kyverno-dev](https://slack.k8s.io/#kyverno-dev) channel. The other is the [CNCF Slack workspace](https://cloud-native.slack.com/#kyverno), where the [#kyverno](https://slack.k8s.io/#kyverno) channel is dedicated to end-user interactions. -If you already have access to the Kubernetes Slack workspace simply select "sign in" at [https://slack.k8s.io/#kyverno](https://slack.k8s.io/#kyverno). +## Meetings -## Community Meetings +The Kyverno project holds two weekly meetings: -To attend our community meetings, join the [Kyverno group](https://groups.google.com/g/kyverno). You will then be sent a meeting invite and will have access to the agenda and meeting notes. Any member may suggest topics for discussion. +### Community Meeting -### Nirmata Office Hours +This is a public, weekly for Kyverno the full community. First time participants, new contributors, or anyone interested in Kyverno are welcome! This forum allows community members to propose agenda items of any sort, including but not limited to releases, roadmap, any contributor PRs or issues on which they are working. -This is a monthly meeting for the broader community where the Kyverno maintainers from [Nirmata](https://nirmata.com/) cover one or more topics with preference given to open community discussion, question and answer, etc. Meeting is live streamed on YouTube with recordings available after. Either join in person or attend in view-only mode: +- Weekly every Wednesday at 9:00 AM PST +- [Agenda and meeting notes](https://docs.google.com/document/d/1kFd4fpAoHS56mRHr73AZp9wknk1Ehy_hTB_KA7gJuy0/) -- Monthly on the second Thursday at 7:00 AM PST -- [Repo](https://github.com/nirmata/office-hours-for-kyverno) +To attend our community meetings, join the [Kyverno group](https://groups.google.com/g/kyverno). You will then be sent a meeting invite and will have access to the agenda and meeting notes. Any member may suggest topics for discussion. ### Maintainers Meeting +This is a public, weekly meetings for maintainers to discuss issues and PRs pertaining to Kyverno's development and roadmap. -This is a public, weekly meetings for maintainers to discuss issues and PRs pertaining to Kyverno's development and direction. Contributors and end users are welcome to attend and participate but may not raise agenda items. Topics to be proposed by non-maintainers should use the Kyverno Community Meeting. +Topics are proposed by maintainers. All in the community are welcome to attend, but non-maintainers may not propose new agenda items in this forum (they can instead to the [community meeting](#community-meeting) agenda. - Weekly every Tuesday at 7:30 AM PST - [Agenda and meeting notes](https://docs.google.com/document/d/1I_GWsz32gLw8sQyuu_Wv0-WQrtRLjn9FuX2KGNkvUY4/edit?usp=sharing) -### Community Meeting - -This is a public, weekly for Kyverno maintainers to make announcements and provide project updates, and request input and feedback. This forum allows community members to raise agenda items of any sort, including but not limited to any PRs or issues on which they are working. - -- Weekly every Wednesday at 9:00 AM PST -- [Agenda and meeting notes](https://docs.google.com/document/d/1kFd4fpAoHS56mRHr73AZp9wknk1Ehy_hTB_KA7gJuy0/) - ## Get in touch If you are unable to attend a community meeting, feel free to reach out anytime on the [Kyverno Slack channel in the Kubernetes workspace](https://slack.k8s.io/#kyverno), or the Kyverno [mailing list](https://groups.google.com/g/kyverno). @@ -50,7 +44,13 @@ We love hearing from our community! Thanks for your interest in contributing! We welcome all types of contributions and encourage you to read our [contribution guidelines](https://github.com/kyverno/kyverno/blob/main/CONTRIBUTING.md) for next steps. -The project contributors use a combination of [GitHub discussions](https://github.com/kyverno/kyverno/discussions), [GitHub Wiki](https://github.com/kyverno/kyverno/wiki) for design documents, and the [#kyverno-dev Slack channel](https://kubernetes.slack.com/archives/C032MM2CH7X) for notes on the development environment, project guidelines, and best practices. +The project contributors use a combination of [GitHub discussions](https://github.com/kyverno/kyverno/discussions) and the [#kyverno-dev Slack channel](https://kubernetes.slack.com/archives/C032MM2CH7X) for notes on the development environment, project guidelines, and best practices. + +Developer documentation is available in the [DEVELOPER.md](https://github.com/kyverno/kyverno/blob/main/DEVELOPMENT.md) file. + +**NOTE:** Developer documentation was previously available in the ([GitHub Wiki](https://github.com/kyverno/kyverno/wiki)) but is no longer maintained there. + +To start contributing, take a look at available issues in the [Good First Issues](https://github.com/orgs/kyverno/projects/10) project. ## Join Kyverno Adopters @@ -62,162 +62,4 @@ To participate, fill out the [Kyverno adopters form](https://forms.gle/K5CApcBAD ## Project Governance -This document highlights the roles and responsibilities for the Kyverno community members. It also outlines the requirements for anyone who is looking to take on leadership roles in the Kyverno project. - -**Note:** Please make sure to read the CNCF [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). - -### Project Roles - -The table below summarizes project roles and responsibilities. Details are provided in the sections following the table: - - -| Role | Requirements | Ongoing Responsibilities | Defined by | -| ------------ | --------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | -| Contributors | At least five (5) contributions to the project. | None | CONTRIBUTORS.md | -| Code Owner | At least ten (10) significant contributions and appointed by 2 maintainers. Highly experienced and active reviewer + contributor to a subproject. | Active contributions, assist maintainers, review and approve contributions.| Maintainers, [CODEOWNERS](https://help.github.com/en/articles/about-code-owners), GitHub organization member. | -| Maintainer | Highly experienced and active contributor + Kyverno Certification + Voted in by Kyverno maintainers. | Code Owner, monitor project growth, set direction and priorities for a subproject. | Voted in by the Kyverno maintainers, listing in `MAINTAINERS.md`, GitHub organization member, and repository owner. | - -#### Contributors - -Contributors are individuals who have made at least five (5) contributions to the project; by authoring PRs, commenting on issues and pull requests, and participating in community discussions on Slack or the mailing list. - -**Checklist before becoming a Contributor** - -- Have at least five (5) PRs successfully merged for any repositories under the Kyverno organization -- Member of the kyverno channel on Kubernetes and/or CNCF Slack -- Attended one (1) Contributors Meeting as documented -- Registered for the Kyverno [mailing list](https://groups.google.com/g/kyverno) - -**Privileges of a Contributor** - -- Listed in the file in at least one (1) organization repository -- Kyverno contributor badge issued - -To join the Kyverno project as a Contributor create a Pull Request (PR) in the [Kyverno repository](https://github.com/kyverno/kyverno) with the following: -1. Changes to add yourself to the [CONTRIBUTORS.md](https://github.com/kyverno/kyverno/blob/main/CONTRIBUTORS.md) file. -2. Links to your prior contributions (at least five). -3. Links to slack discussions, issue comments, etc. - -#### Code Owners - -Code Owners are a special type of contributor and have _significantly_ contributed and maintain an _active_ status within the organization. They can have issues and PRs assigned to them and are responsible for providing PR reviews. Unlike Contributors, Code Owners have responsibilities and must maintain an active status defined below to remain a Code Owner. - -**Checklist before becoming a Code Owner** - -- Have at least ten (10) significant PRs successfully merged for any combination of repositories under the Kyverno organization -- Member of the kyverno channel on Kubernetes and/or CNCF Slack -- Attended five (5) Contributors Meetings as documented -- Registered for the Kyverno [mailing list](https://groups.google.com/g/kyverno) -- Create a pull request to add self to `CODEOWNERS` file in at least one (1) repository -- Attained a minimum of two (2) positive votes from maintainers -- Respond to reviews from maintainers on pull requests - -**Responsibilities of a Code Owner** - -- Maintain an active status in a three (3) month period to include any of the following: - - One (1) PR filed - - Any request for PR review responded to - - One (1) issue or PR responded to - - One (1) Slack thread responded to - - Two (2) attendance at weekly Contributors Meetings - -**Privileges of a Code Owner** - -- Listed as an organization member -- Listed in `CODEOWNERS` in at least one (1) repository -- Kyverno contributor badge issued -- Have issues assigned to them -- Have PRs assigned to them - -**On-boarding Criteria** - -- Voted in by a majority of current maintainers, raised in a PR by the proposed member to add themselves to `CODEOWNERS`, during a voting period lasting seven (7) days - -**Off-boarding Criteria** - -- Voted out by a majority of current maintainers via a GitHub issue during a voting period lasting seven (7) days. A vote may be called by any maintainer after the point at which the responsibilities have not been met. A positive vote will result in removal from `CODEOWNERS` and from organization membership. - -#### Maintainers - -Maintainers are individuals who go beyond the status of code owner who have shown good technical judgement in feature design/development in the past. Maintainers have overall knowledge of the project and features in the project. They can read, clone, and push to the repository. They can also manage issues, pull requests, and some repository settings. - -[Maintainers](https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-roles-for-an-organization#repository-access-for-each-permission-level) are the technical authority for a subproject and are considered leaders for the organization as a whole. They must have demonstrated both good judgement and responsibility towards the health of the subproject. Maintainers must set technical direction and make or approve design decisions for their subproject, either directly or through delegation of these responsibilities. Unlike contributors and code owners, maintainers have the highest degree of responsibility and ownership for the project. Maintainer status may be subject to a vote and, if the minimum level of activity is not maintained, may be moved to an _emeritus_ status. - -**Checklist before becoming a Maintainer:** - -- Proficient in GitHub, YAML, Markdown, and Git -- Exhibits strong attention to detail when reviewing commits and provides generous guidance and feedback -- Helps others achieve their goals with open-source and community contributions -- Understands the workflow of the Issues and Pull Requests -- Makes consistent contributions to the Kyverno project -- Consistently initiates and participates in [Kyverno discussions](https://slack.k8s.io/#kyverno) -- Has knowledge and interest that aligns with the overall project goals, specifications, and design principles of the Kyverno project -- Makes contributions that are considered notable -- Demonstrates ability to help troubleshoot and resolve user issues -- Has achieved the Kyverno Certification or demonstrated an equivalent mastery of Kyverno -- Meets or exceeds all the requirements of a Code Owner -- Maintains an active status as a Code Owner for a period of six (6) months - -**Responsibilities of a Maintainer** - -The following responsibilities apply to the subproject for which one would be an owner and maintainer. - -- All the responsibilities of a Code Owner -- Tracks and ensures adequate health of the modules and subprojects they are in charge of -- Ensures adequate test coverage to confidently release new features and fixes -- Ensures that tests are passing reliably (i.e. not flaky) and are fixed when they fail -- Mentors and guides code owners, reviewers, and contributors -- Actively participates in the processes for discussion and decision making in the project -- Merges Pull Requests and helps prepare releases -- Makes and approves technical design decisions for the subproject -- Helps define milestones and releases -- Decides on when PRs are merged to control the release scope -- Works with other maintainers to maintain the project's overall health and success holistically - -**Privileges of a Maintainer** - -- Privileges of a Code Owner -- Receives a Kyverno Maintainer Badge -- Listed in `MAINTAINERS.md` - -**On-boarding Criteria** - -- Voted in by a majority of current maintainers, raised in a PR by the proposed member to add themselves to `MAINTAINERS.md`, during a voting period lasting seven (7) days - -**Off-boarding Criteria** - -An off-boarding vote may be called by any maintainer if any of the following criteria are met: -- A maintainer has made less than 30 contributions over a span of 6 months. - - Contributions can be tracked using the [DevStats dashboard](https://kyverno.devstats.cncf.io/d/66/developer-activity-counts-by-companies?orgId=1&var-period_name=Last%206%20months&var-metric=contributions&var-repogroup_name=All&var-country_name=All&from=1522810884223&to=1680577284223&var-companies=All). - - Other relevant data will be collected and evaluated to assess the maintainer's contributions. This includes their involvement in discussions, conversations on Slack, and any other relevant interactions. - - -The off-boarding process includes the following steps: -- The off-boarding process is initiated by any currently active maintainer who conducts a review of the maintainers list and proceeds to initialize the off-boarding process if the above criteria are met. -- The plans of off-boarding process is sent in a private Slack message or email to the candidate. -- If the candidate for removal states plans to continue participating, another 6 months will be granted to the candidate to make contributions and the new cycle starts. No action is taken and this process terminates. -- If the candidate fails to meet the criteria during the second attempt to make contributions, the off-boarding process continues. -- A pull request (PR) proposing movement of the candidate is sent, initiating the public voting phase. -- The vote passes if a majority of current maintainers vote yes during a voting period lasting seven (7) days. -- A positive vote will result in movement to an _emeritus_ status within `MAINTAINERS.md` and removal from organization membership. - - -#### Admins - -These are persons who have full access to the project, including sensitive and destructive actions like managing security or deleting a repository. Admins can read, clone, and push to this repository. They can also manage issues, pull requests, and repository settings, including adding collaborators. - - -#### Mapping Project Roles to GitHub Roles - -The roles used in this document are custom roles mapped according to the [GitHub roles and responsibilities](https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-roles-for-an-organization). - -| Project Role | GitHub Role | -| -------------- | -------------- | -| Contributor | Triage | -| Code Owner | Write | -| Maintainer | Maintain | -| Administrator | Admin | - -### Off-boarding Guidance - -If any of the above roles hasn't contributed in any phases (including, but not limited to: code changes, doc updates, issue discussions) in 3 months, the administrator needs to inform the member and remove one's roles and GitHub permissions. \ No newline at end of file +[Kyverno and its sub-projects](https://github.com/kyverno#projects) follow the governance published and maintained at https://github.com/kyverno/community/blob/main/GOVERNANCE.md. diff --git a/content/en/docs/CRDs/_index.md b/content/en/docs/CRDs/_index.md index 03a2f0c5e..449412812 100644 --- a/content/en/docs/CRDs/_index.md +++ b/content/en/docs/CRDs/_index.md @@ -43,9 +43,7 @@ FIELDS: each rule can validate, mutate, or generate resources. schemaValidation - SchemaValidation skips policy validation checks. Optional. The default - value is set to "true", it must be set to "false" to disable the validation - checks. + Deprecated. validationFailureAction ValidationFailureAction controls if a validation policy rule failure should diff --git a/content/en/docs/installation/_index.md b/content/en/docs/installation/_index.md index f1448d3ca..acdeaf3be 100644 --- a/content/en/docs/installation/_index.md +++ b/content/en/docs/installation/_index.md @@ -49,6 +49,7 @@ Kyverno follows the same support policy as the Kubernetes project (N-2 policy) i | 1.10.x | 1.24 | 1.26 | | 1.11.x | 1.25 | 1.28 | | 1.12.x | 1.26 | 1.29 | +| 1.13.x | 1.28 | 1.31 | \* Due to a known issue with Kubernetes 1.23.0-1.23.2, support for 1.23 begins at 1.23.3. diff --git a/content/en/docs/installation/customization.md b/content/en/docs/installation/customization.md index 293cd03f2..d6323fb86 100644 --- a/content/en/docs/installation/customization.md +++ b/content/en/docs/installation/customization.md @@ -61,13 +61,13 @@ At a minimum, managed certificates are checked for validity every 12 hours. Addi The renewal process runs as follows: 1. Remove expired certificates contained in the secret -1. Check if remaining certificates will become invalid in less than 60 hours +1. Check if remaining certificates will become invalid in less than 15 days 1. If needed, generate a new certificate with the validity documented above 1. The new certificates is added to the underlying secret along with current certificatess that are still valid 1. Reconfigure webhooks with the new certificates bundle 1. Update the Kyverno server to use the new certificate -Basically, certificates will be renewed approximately 60 hours before expiry. +Basically, certificates will be renewed approximately 15 days before expiry. #### Custom certificates @@ -153,10 +153,14 @@ Kyverno uses Secrets created above to setup TLS communication with the Kubernete You can now install Kyverno by selecting one of the available methods from the [installation section](methods.md). -### Roles and Permissions +### Role Based Access Controls -Kyverno creates several Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings some of which may need to be customized depending on additional functionality required. To view all ClusterRoles and Roles associated with Kyverno, use the command `kubectl get clusterroles,roles -A | grep kyverno`. +Kyverno uses Kubernetes Role Based Access Controls (RBAC) to configure permissions for Kyverno controllers to allow access to other resources. +Kyverno creates several Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings some of which may need to be customized depending on additional functionality required. + +To view all ClusterRoles and Roles associated with Kyverno, use the command `kubectl get clusterroles,roles -A | grep kyverno`. + #### Roles Kyverno creates the following Roles in its Namespace, one per controller type: @@ -183,33 +187,53 @@ Kyverno uses [aggregated ClusterRoles](https://kubernetes.io/docs/reference/acce The following `ClusterRoles` provide Kyverno with permissions to policies and other Kubernetes resources across all Namespaces. -* `kyverno:admission-controller:core`: aggregate ClusterRole for the admission controller -* `kyverno:admission-controller`: aggregated (top-level) ClusterRole for the admission controller -* `kyverno:reports-controller:core`: aggregate ClusterRole for the reports controller -* `kyverno:reports-controller`: aggregated (top-level) ClusterRole for the reports controller -* `kyverno:background-controller:core`: aggregate ClusterRole for the background controller -* `kyverno:background-controller`: aggregated (top-level) ClusterRole for the background controller -* `kyverno:cleanup-controller:core`: aggregate ClusterRole for the cleanup controller -* `kyverno:cleanup-controller`: aggregated (top-level) ClusterRole for the cleanup controller -* `kyverno-cleanup-jobs`: used by the helper CronJob to periodically remove excessive/stale admission reports if found -* `kyverno:rbac:admin:policies`: aggregates to admin the ability to fully manage Kyverno policies -* `kyverno:rbac:admin:policyreports`: aggregates to admin the ability to fully manage Policy Reports -* `kyverno:rbac:admin:reports`: aggregates to admin the ability to fully manage intermediary admission and background reports -* `kyverno:rbac:admin:updaterequests`: aggregates to admin the ability to fully manage UpdateRequests, intermediary resource for generate rules -* `kyverno:rbac:view:policies`: aggregates to view the ability to view Kyverno policies -* `kyverno:rbac:view:policyreports`: aggregates to view the ability to view Policy Reports -* `kyverno:rbac:view:reports`: aggregates to view the ability to view intermediary admission and background reports -* `kyverno:rbac:view:updaterequests`: aggregates to view the ability to view UpdateRequests, intermediary resource for generate rules +Role Binding | Service Account | Role +---------------------------------- | ------------------------------------ | ------------- +kyverno:admission-controller | kyverno-admission-controller | kyverno:admission-controller +kyverno:admission-controller:view | kyverno-admission-controller | view +kyverno:admission-controller:core | -- | -- +kyverno:background-controller | kyverno-background-controller | kyverno:background-controller +kyverno:background-controller:view | kyverno-background-controller | view +kyverno:background-controller:core | -- | -- +kyverno:cleanup-controller | kyverno-cleanup-controller | kyverno:cleanup-controller +kyverno:cleanup-controller:core | -- | -- +kyverno:reports-controller | kyverno-reports-controller | kyverno:reports-controller +kyverno:reports-controller:view | kyverno-reports-controller | view +kyverno:reports-controller:core | -- | -- + {{% alert title="Note" color="info" %}} -Most Kyverno controllers' ClusterRoles include a rule which allows for `get`, `list`, and `read` permissions to all resources in the cluster. This is to ensure Kyverno functions smoothly despite the type and subject of future-installed policies. If this rule is removed, users must manually create and manage a number of different ClusterRoles applicable across potentially multiple controllers depending on the type and configuration of installed policies. +The Kyverno admission, background, and reports controller have a role binding to the built-in `view` role. This allows these Kyverno controllers view access to most namespaced resources. You can customize this role during Helm installation using variables like `admissionController.rbac.viewRoleName`. {{% /alert %}} #### Customizing Permissions -Because the ClusterRoles used by Kyverno use the [aggregation feature](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles), extending the permission for Kyverno's use in cases like mutate existing or generate rules or generating ValidatingAdmissionPolicies is a simple matter of creating one or more new ClusterRoles which use the appropriate labels. It is not necessary to modify any existing ClusterRoles created as part of the Kyverno installation. Doing so is not recommended as changes may be lost during an upgrade. Since there are multiple controllers each with their own ServiceAccount, granting Kyverno additional permissions involves identifying the correct controller and using the labels needed to aggregate to that ClusterRole. +Kyverno's default permissions are designed to cover commonly used and security non-critical resources. Hence, Kyverno will need to be configured with additional permissions for CRDs, or to allow access to security critical resources. + +The ClusterRoles installed by Kyverno use the [cluster role aggregation feature](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles), making it easy to extend the permissions for Kyverno's controllers. To extend a controller's permissions, add a new role with one or more of the following labels: + +Controller | Role Aggregation Label +---------------------- | ----------------------------- +admission-controller | rbac.kyverno.io/aggregate-to-admission-controller: "true" +background-controller | rbac.kyverno.io/aggregate-to-background-controller: "true" +reports-controller | rbac.kyverno.io/aggregate-to-reports-controller: "true" +cleanup-controller | rbac.kyverno.io/aggregate-to-cleanup-controller: "true" + +To avoid upgrade issues, it is highly recommended that default roles are not modified but new roles are used to extend them. -For example, if a new Kyverno generate policy requires that Kyverno be able to create or modify Deployments, this is not a permission Kyverno has by default. Generate rules are handled by the background controller and so it will be necessary to create a new ClusterRole and assign it the aggregation labels specific to the background controller in order for those permissions to take effect. +Since there are multiple controllers each with their own ServiceAccount, granting Kyverno additional permissions involves identifying the correct controller and using the labels needed to aggregate to that ClusterRole. The table below identifies required permissions for Kyverno features: + +Controller | Permission Verbs | Required For +---------------------- | ----- | ------------------------------- +admission-controller | view, list, ... | API Calls +admission-controller | view, list, watch | Global Context +background-controller | update, view, list, watch | Mutate Policies +background-controller | create, update, delete, view, list, watch | Generate Policies +reports-controller | view, list, watch | Policy Reports +cleanup-controller | delete, view, list, watch | Cleanup Policies + + +For example, if a new Kyverno generate policy requires that Kyverno be able to create and update Deployments, new permissions need to be provided. Generate rules are handled by the background controller and so it will be necessary to create a new ClusterRole and assign it the aggregation labels specific to the background controller in order for those permissions to take effect. This sample ClusterRole provides the Kyverno background controller additional permissions to create Deployments: @@ -217,11 +241,9 @@ This sample ClusterRole provides the Kyverno background controller additional pe apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - labels: - app.kubernetes.io/component: background-controller - app.kubernetes.io/instance: kyverno - app.kubernetes.io/part-of: kyverno name: kyverno:create-deployments + labels: + rbac.kyverno.io/aggregate-to-background-controller: "true" rules: - apiGroups: - apps @@ -229,15 +251,51 @@ rules: - deployments verbs: - create + - update ``` -Once a supplemental ClusterRole has been created, get the top-level ClusterRole for that controller to ensure aggregation has occurred. +Once a supplemental ClusterRole has been created, check the top-level ClusterRole for that controller to ensure aggregation has occurred. ```sh kubectl get clusterrole kyverno:background-controller -o yaml ``` -Generating Kubernetes ValidatingAdmissionPolicies and their bindings are handled by the admission controller and it will be necessary to grant the controller the required permissions to generate these types. In this scenario, a ClusterRole should be created and assigned the aggregation labels for the admission controller in order for those permissions to take effect. +Similary, if a Kyverno validate and mutate policies operates on a custom resource the background and reports controllers needs to be provided permissions to manage the resource: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kyverno:crontab:edit + labels: + rbac.kyverno.io/aggregate-to-background-controller: "true" +rules: +- apiGroups: + - stable.example.com + resources: + - crontabs + verbs: + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kyverno:crontab:view + labels: + rbac.kyverno.io/aggregate-to-background-controller: "true" + rbac.kyverno.io/aggregate-to-reports-controller: "true" +rules: +- apiGroups: + - stable.example.com + resources: + - crontabs + verbs: + - get + - list + - watch +``` + +Generating Kubernetes ValidatingAdmissionPolicies and their bindings are handled by the admission controller and it is necessary to grant the controller the required permissions to generate these types. For this, a ClusterRole should be created and assigned the aggregation labels for the admission controller in order for those permissions to take effect. This sample ClusterRole provides the Kyverno admission controller additional permissions to create ValidatingAdmissionPolicies and ValidatingAdmissionPolicyBindings: @@ -245,11 +303,9 @@ This sample ClusterRole provides the Kyverno admission controller additional per apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - labels: - app.kubernetes.io/component: admission-controller - app.kubernetes.io/instance: kyverno - app.kubernetes.io/part-of: kyverno name: kyverno:generate-validatingadmissionpolicy + labels: + rbac.kyverno.io/aggregate-to-admission-controller: "true" rules: - apiGroups: - admissionregistration.k8s.io @@ -257,10 +313,12 @@ rules: - validatingadmissionpolicies - validatingadmissionpolicybindings verbs: + - get + - list + - watch - create - update - delete - - list ``` ### ConfigMap Keys @@ -276,9 +334,10 @@ The following keys are used to control the behavior of Kyverno and must be set i 7. `generateSuccessEvents`: specifies whether (true/false) to generate success events. Default is set to "false". 8. `matchConditions`: uses CEL-based expressions in the webhook configuration to narrow which admission requests are forwarded to Kyverno. Requires Kubernetes 1.27+ with the `AdmissionWebhookMatchConditions` feature gate to be enabled. 9. `resourceFilters`: Kubernetes resources in the format "[kind,namespace,name]" where the policy is not evaluated by the admission webhook. For example --filterKind "[Deployment, kyverno, kyverno]" --filterKind "[Deployment, kyverno, kyverno],[Events, *, *]". Note that resource filters do not apply to background scanning mode. See the [Resource Filters](#resource-filters) section for more complete information. -10. `webhooks`: specifies the Namespace or object exclusion to configure in the webhooks managed by Kyverno. Default is `'[{"namespaceSelector": {"matchExpressions": [{"key":"kubernetes.io/metadata.name","operator":"NotIn","values":["kyverno"]}]}}]'`. -11. `webhookAnnotations`: instructs Kyverno to add annotations to its webhooks for AKS support. Default is undefined. See the [AKS notes](platform-notes.md#notes-for-aks-users) section for details. -12. `webhookLabels`: instructs Kyverno to add labels to its webhooks. Default is undefined. +10. `updateRequestThreshold`: sets the threshold for the total number of updaterequests generated for mutateExisting and generate policies. It takes the value of the string and default is 1000. +11. `webhooks`: specifies the Namespace or object exclusion to configure in the webhooks managed by Kyverno. Default is `'[{"namespaceSelector":{"matchExpressions":[{"key":"kubernetes.io/metadata.name","operator":"NotIn","values":["kube-system"]},{"key":"kubernetes.io/metadata.name","operator":"NotIn","values":["kyverno"]}],"matchLabels":null}}]'`. +12. `webhookAnnotations`: instructs Kyverno to add annotations to its webhooks for AKS support. Default is undefined. See the [AKS notes](platform-notes.md#notes-for-aks-users) section for details. +13. `webhookLabels`: instructs Kyverno to add labels to its webhooks. Default is undefined. ### Container Flags @@ -294,7 +353,8 @@ The following flags can be used to control the advanced behavior of the various | `add_dir_header` (ABCR) | | Adds the file directory to the header of the log messages. | | `admissionReports` (AR) | true | Enables the AdmissionReport resource which is created from validate rules in `Audit` mode. Used to factor into a final PolicyReport. | | `aggregateReports` (R) | true | Enables the report aggregating ability of AdmissionReports (1.10.2+). | -| `allowInsecureRegistry` (ABR)| | Allows Kyverno to work with insecure registries (i.e., bypassing certificate checks) either with [verifyImages](../writing-policies/verify-images/) rules or [variables from image registries](../writing-policies/external-data-sources.md#variables-from-image-registries). Only for testing purposes. Not to be used in production situations. | +| `aggregationWorkers` (R) | 10 | Configures the number of internal worker threads used to perform reports aggregation (1.12.3+). | +| `allowInsecureRegistry` (ABR)| `"false"` | Allows Kyverno to work with insecure registries (i.e., bypassing certificate checks) either with [verifyImages](../writing-policies/verify-images/) rules or [variables from image registries](../writing-policies/external-data-sources.md#variables-from-image-registries). Only for testing purposes. Not to be used in production situations. | | `alsologtostderr` (ABCR) | | Log to standard error as well as files (no effect when -logtostderr=true) | | `autoUpdateWebhooks` (A) | true | Set this flag to `false` to disable auto-configuration of the webhook. With this feature disabled, Kyverno creates a default webhook configuration (which matches ALL resources), therefore, webhooks configuration via the ConfigMap will be ignored. However, the user still can modify it by patching the webhook resource manually. Setting this flag to `false` after it has been set to `true` will retain existing webhooks and automatic updates will cease. All further changes will be manual in nature. If the webhook or webhook configuration resource is deleted, it will be replaced by one matching on a wildcard. | | `backgroundServiceAccountName` (A) | | The name of the background controller's ServiceAccount name allowing the admission controller to disregard any AdmissionReview requests coming from Kyverno itself. This may need to be removed in situations where, for example, Kyverno needs to mutate a resource it just generated. Default is set to the ServiceAccount for the background controller.| @@ -305,8 +365,8 @@ The following flags can be used to control the advanced behavior of the various | `cleanupServerPort` (C) | 9443 | Defines the port used by the cleanup server. Usually changed in tandem with `webhookServerPort`.| | `clientRateLimitBurst` (ABCR) | 300 | Configures the maximum burst for throttling. Uses the client default if zero. | | `clientRateLimitQPS` (ABCR) | 300 | Configures the maximum QPS to the API server from Kyverno. Uses the client default if zero. | -| `eventsRateLimitBurst` (ABCR) | 300 | Configures the maximum burst for throttling for events. Uses the client default if zero. | -| `eventsRateLimitQPS` (ABCR) | 300 | Configures the maximum QPS to the API server from Kyverno for events. Uses the client default if zero. | +| `eventsRateLimitBurst` (ABCR) | 2000 | Configures the maximum burst for throttling for events. Uses the client default if zero. | +| `eventsRateLimitQPS` (ABCR) | 1000 | Configures the maximum QPS to the API server from Kyverno for events. Uses the client default if zero. | | `disableMetrics` (ABCR) | false | Specifies whether to enable exposing the metrics. | | `dumpPayload` (AC) | false | Toggles debug mode. When debug mode is enabled, the full AdmissionReview payload is logged. Additionally, resources of kind Secret are redacted. Should only be used in policy development or troubleshooting scenarios, not left perpetually enabled. | | `enableConfigMapCaching` (ABR) | true | Enables the ConfigMap caching feature. | @@ -336,7 +396,7 @@ The following flags can be used to control the advanced behavior of the various | `maxAuditCapacity` (A) | `1000` | Maximum number of workers for audit policy processing. | | `maxQueuedEvents` (ABR) | `1000` | Defines the upper limit of events that are queued internally. | | `metricsPort` (ABCR) | `8000` | Specifies the port to expose prometheus metrics. | -| `omit-events` (ABR) | | Specifies the type of Kyverno events which should not be emitted. Accepts a comma-separated string with possible values `PolicyViolation`, `PolicyApplied`, `PolicyError`, and `PolicySkipped`. Default is undefined (all events will be emitted). | +| `omitEvents` (ABR) | `"PolicyApplied,PolicySkipped"` | Specifies the type of Kyverno events which should not be emitted. Accepts a comma-separated string with possible values `PolicyViolation`, `PolicyApplied`, `PolicyError`, and `PolicySkipped`. Default is `PolicyApplied` and `PolicySkipped`. | | `one_output` (ABCR) | | If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true). | | `otelCollector` (ABCR) | | Sets the OpenTelemetry collector service address. Kyverno will try to connect to this on the metrics port. Default is `opentelemetrycollector.kyverno.svc.cluster.local`. | | `otelConfig` (ABCR) | `prometheus` | Sets the preference for Prometheus or OpenTelemetry. Set to `grpc` to enable OpenTelemetry. | @@ -345,7 +405,7 @@ The following flags can be used to control the advanced behavior of the various | `profileAddress` (ABCR) | `""` | Configures the address of the profiling server. | | `profilePort` (ABCR) | `6060` | Specifies port to enable profiling. | | `protectManagedResources` (AC) | false | Protects the Kyverno resources from being altered by anyone other than the Kyverno Service Account. Set to `true` to enable. | -| `registryCredentialHelpers` (ABR) | | Enables cloud-registry-specific authentication helpers. Defaults to `"default,google,amazon,azure,github"`. | +| `registryCredentialHelpers` (ABR) | `"default,google,amazon,azure,github"` | Enables cloud-registry-specific authentication helpers. Defaults to `"default,google,amazon,azure,github"`. | | `renewBefore` (AC) | `15d` | Sets the certificate renewal time before expiration (in days). | | `reportsChunkSize` (R) | `1000` | Maximum number of results in generated reports before splitting occurs if there are more results to be stored. Deprecated. | | `serverIP` (AC) | | Like the `kubeconfig` flag, used when running Kyverno outside of the cluster which it serves. | diff --git a/content/en/docs/installation/methods.md b/content/en/docs/installation/methods.md index 9c2db02b3..9f8e23eee 100644 --- a/content/en/docs/installation/methods.md +++ b/content/en/docs/installation/methods.md @@ -26,9 +26,13 @@ Optionally, show all available chart versions for Kyverno. helm search repo kyverno -l ``` -Choose one of the installation configuration options based upon your environment type and availability needs. For a production installation, see the [High Availability](#high-availability) section. For a non-production installation, see the [Standalone](#standalone) section below for additional details. +Choose one of the installation configuration options based upon your environment type and availability needs. +- For a production installation, see below [High Availability](#high-availability) section. +- For a non-production installation, see below [Standalone](#standalone) section for additional details. +{{% alert title="Note" color="warning" %}} When deploying Kyverno to certain Kubernetes platforms such as EKS, AKS, or OpenShift; or when using certain GitOps tools such as ArgoCD, additional configuration options may be needed or recommended. See the [Platform-Specific Notes](platform-notes.md) section for additional details. +{{% /alert %}} After Kyverno is installed, you may choose to also install the Kyverno [Pod Security Standard policies](../../pod-security.md), an optional chart containing the full set of Kyverno policies which implement the Kubernetes [Pod Security Standards](https://kubernetes.io/docs/concepts/security/pod-security-standards/). @@ -45,10 +49,15 @@ Since Kyverno is comprised of different controllers where each is contained in s The Helm chart offers parameters to configure multiple replicas for each controller. For example, a highly-available, complete deployment of Kyverno would consist of the following values. ```yaml -admissionController.replicas: 3 -backgroundController.replicas: 2 -cleanupController.replicas: 2 -reportsController.replicas: 2 +admissionController: + replicas: 3 +backgroundController: + replicas: 2 +cleanupController: + replicas: 2 +reportsController: + replicas: 2 + ``` For all of the available values and their defaults, please see the Helm chart [README](https://github.com/kyverno/kyverno/tree/release-1.10/charts/kyverno). You should carefully inspect all available chart values and their defaults to determine what overrides, if any, are necessary to meet the particular needs of your production environment. diff --git a/content/en/docs/introduction/_index.md b/content/en/docs/introduction/_index.md index c43ce226c..492177616 100644 --- a/content/en/docs/introduction/_index.md +++ b/content/en/docs/introduction/_index.md @@ -3,299 +3,24 @@ title: "Introduction" linkTitle: "Introduction" weight: 10 description: > - Learn about Kyverno and create your first policy through a Quick Start guide. + Learn about Kyverno and its powerful capabilities --- ## About Kyverno -Kyverno (Greek for "govern") is a policy engine designed specifically for Kubernetes. Some of its many features include: +Kyverno (Greek for "govern") is a cloud native policy engine. It was originally built for Kubernetes and now can also be used outside of Kubernetes clusters as a unified policy language. -* policies as Kubernetes resources (no new language to learn!) -* validate, mutate, generate, or cleanup (remove) any resource -* verify container images for software supply chain security -* inspect image metadata -* match resources using label selectors and wildcards -* validate and mutate using overlays (like Kustomize!) -* synchronize configurations across Namespaces -* block non-conformant resources using admission controls, or report policy violations -* self-service reports (no proprietary audit log!) -* self-service policy exceptions -* test policies and validate resources using the Kyverno CLI, in your CI/CD pipeline, before applying to your cluster -* manage policies as code using familiar tools like `git` and `kustomize` +Kyverno allows platform engineers to automate security, complianace, and best practices validation and deliver secure self-service to application teams. -Kyverno allows cluster administrators to manage environment specific configurations independently of workload configurations and enforce configuration best practices for their clusters. Kyverno can be used to scan existing workloads for best practices, or can be used to enforce best practices by blocking or mutating API requests. +Some of its many features include: -## How Kyverno works +* policies as YAML-based declarative Kubernetes resources with no new language to learn! +* enforce policies as a Kubernetes admission controller, CLI-based scanner, and at runtime +* validate, mutate, generate, or cleanup (remove) any Kubernetes resource +* verify container images and metadata for software supply chain security +* policies for any JSON payload including Terraform resources, cloud resources, and service authoriation +* policy reporting using the open reporting format from the CNCF Policy WG +* flexible policy exception management +* tooling for comprehensive unit and e2e testing of policies +* management of policies as code resources using familiar tools like `git` and `kustomize` -Kyverno runs as a [dynamic admission controller](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) in a Kubernetes cluster. Kyverno receives validating and mutating admission webhook HTTP callbacks from the Kubernetes API server and applies matching policies to return results that enforce admission policies or reject requests. - -Kyverno policies can match resources using the resource kind, name, label selectors, and much more. - -Mutating policies can be written as overlays (similar to [Kustomize](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/#bases-and-overlays)) or as a [RFC 6902 JSON Patch](http://jsonpatch.com/). Validating policies also use an overlay style syntax, with support for pattern matching and conditional (if-then-else) processing. - -Policy enforcement is captured using Kubernetes events. For requests that are either allowed or existed prior to introduction of a Kyverno policy, Kyverno creates Policy Reports in the cluster which contain a running list of resources matched by a policy, their status, and more. - -The diagram below shows the high-level logical architecture of Kyverno. - -Kyverno Architecture -

- -The **Webhook** is the server which handles incoming AdmissionReview requests from the Kubernetes API server and sends them to the **Engine** for processing. It is dynamically configured by the **Webhook Controller** which watches the installed policies and modifies the webhooks to request only the resources matched by those policies. The **Cert Renewer** is responsible for watching and renewing the certificates, stored as Kubernetes Secrets, needed by the webhook. The **Background Controller** handles all generate and mutate-existing policies by reconciling UpdateRequests, an intermediary resource. And the **Report Controllers** handle creation and reconciliation of Policy Reports from their intermediary resources, Admission Reports and Background Scan Reports. - -Kyverno also supports high availability. A highly-available installation of Kyverno is one in which the controllers selected for installation are configured to run with multiple replicas. Depending on the controller, the additional replicas may also serve the purpose of increasing the scalability of Kyverno. See the [high availability page](../high-availability/_index.md) for more details on the various Kyverno controllers, their components, and how availability is handled in each one. - -## Quick Start Guides - -This section is intended to provide you with some quick guides on how to get Kyverno up and running and demonstrate a few of Kyverno's seminal features. There are quick start guides which focus on validation, mutation, as well as generation allowing you to select the one (or all) which is most relevant to your use case. - -These guides are intended for proof-of-concept or lab demonstrations only and not recommended as a guide for production. Please see the [installation page](../installation/_index.md) for more complete information on how to install Kyverno in production. - -First, install Kyverno from the latest release manifest. - -```sh -kubectl create -f https://github.com/kyverno/kyverno/releases/download/v1.12.0/install.yaml -``` - -Next, select the quick start guide in which you are interested. Alternatively, start at the top and work your way down. - -### Validation - -In the validation guide, you will see how simple an example Kyverno policy can be which ensures a label called `team` is present on every Pod. Validation is the most common use case for policy and functions as a "yes" or "no" decision making process. Resources which are compliant with the policy are allowed to pass ("yes, this is allowed") and those which are not compliant may not be allowed to pass ("no, this is not allowed"). An additional effect of these validate policies is to produce Policy Reports. A [Policy Report](../policy-reports/_index.md) is a custom Kubernetes resource, produced and managed by Kyverno, which shows the results of policy decisions upon allowed resources in a user-friendly way. - -Add the policy below to your cluster. It contains a single validation rule that requires that all Pods have the `team` label. Kyverno supports different rule types to validate, mutate, generate, cleanup, and verify image configurations. The field `validationFailureAction` is set to `Enforce` to block Pods that are non-compliant. Using the default value `Audit` will report violations but not block requests. - -```yaml -kubectl create -f- << EOF -apiVersion: kyverno.io/v1 -kind: ClusterPolicy -metadata: - name: require-labels -spec: - validationFailureAction: Enforce - rules: - - name: check-team - match: - any: - - resources: - kinds: - - Pod - validate: - message: "label 'team' is required" - pattern: - metadata: - labels: - team: "?*" -EOF -``` - -Try creating a Deployment without the required label. - -```sh -kubectl create deployment nginx --image=nginx -``` - -You should see an error. - -```sh -error: failed to create deployment: admission webhook "validate.kyverno.svc-fail" denied the request: - -resource Deployment/default/nginx was blocked due to the following policies: - -require-labels: - autogen-check-team: 'validation error: label ''team'' is - required. Rule autogen-check-team failed at path /spec/template/metadata/labels/team/' -``` - -In addition to the error returned, Kyverno also produces an Event in the same Namespace which contains this information. - -{{% alert title="Note" color="info" %}} -Kyverno may be configured to exclude system Namespaces like `kube-system` and `kyverno`. Make sure you create the Deployment in a user-defined Namespace or the `default` Namespace (for testing only). -{{% /alert %}} - -Note that how although the policy matches on Pods, Kyverno blocked the Deployment you just created. This is because Kyverno intelligently applies policies written exclusively for Pods, using its [rule auto-generation](../writing-policies/autogen.md) feature, to all standard Kubernetes Pod controllers including the Deployment above. - -Now, create a Pod with the required label. - -```sh -kubectl run nginx --image nginx --labels team=backend -``` - -This Pod configuration is compliant with the policy and is allowed. - -Now that the Pod exists, wait just a few seconds longer and see what other action Kyverno took. Run the following command to retrieve the Policy Report that Kyverno just created. - -```sh -kubectl get policyreport -o wide -``` - -Notice that there is a single Policy Report with just one result listed under the "PASS" column. This result is due to the Pod we just created having passed the policy. - -```sh -NAME KIND NAME PASS FAIL WARN ERROR SKIP AGE -89044d72-8a1e-4af0-877b-9be727dc3ec4 Pod nginx 1 0 0 0 0 15s -``` - -If you were to describe the above policy report you would see more information about the policy and resource. - -```yaml -Results: - Message: validation rule 'check-team' passed. - Policy: require-labels - Resources: - API Version: v1 - Kind: Pod - Name: nginx - Namespace: default - UID: 07d04dc0-fbb4-479a-b049-a3d63342b354 - Result: pass - Rule: check-team - Scored: true - Source: kyverno - Timestamp: - Nanos: 0 - Seconds: 1683759146 -``` - -Policy reports are helpful in that they are both user- and tool-friendly, based upon an open standard, and separated from the policies which produced them. This separation has the benefit of report access being easy to grant and manage for other users who may not need or have access to Kyverno policies. - -Now that you've experienced validate policies and seen a bit about policy reports, clean up by deleting the policy you created above. - -```sh -kubectl delete clusterpolicy require-labels -``` - -Congratulations, you've just implemented a validation policy in your Kubernetes cluster! For more details on validation policies, see the [validate section](../writing-policies/validate.md). - -### Mutation - -Mutation is the ability to change or "mutate" a resource in some way prior to it being admitted into the cluster. A mutate rule is similar to a validate rule in that it selects some type of resource (like Pods or ConfigMaps) and defines what the desired state should look like. - -Add this Kyverno mutate policy to your cluster. This policy will add the label `team` to any new Pod and give it the value of `bravo` but only if a Pod does not already have this label assigned. Kyverno has the ability to perform basic "if-then" logical decisions in a very easy way making policies trivial to write and read. The `+(team)` notation uses a Kyverno anchor to define the behavior Kyverno should take if the label key is not found. - -```yaml -kubectl create -f- << EOF -apiVersion: kyverno.io/v1 -kind: ClusterPolicy -metadata: - name: add-labels -spec: - rules: - - name: add-team - match: - any: - - resources: - kinds: - - Pod - mutate: - patchStrategicMerge: - metadata: - labels: - +(team): bravo -EOF -``` - -Let's now create a new Pod which does not have the desired label defined. - -```sh -kubectl run redis --image redis -``` - -{{% alert title="Note" color="info" %}} -Kyverno may be configured to exclude system Namespaces like `kube-system` and `kyverno`. Make sure you create the Pod in a user-defined Namespace or the `default` Namespace (for testing only). -{{% /alert %}} - -Once the Pod has been created, get the Pod to see if the `team` label was added. - -```sh -kubectl get pod redis --show-labels -``` - -You should see that the label `team=bravo` has been added by Kyverno. - -Try one more Pod, this time one which does already define the `team` label. - -```sh -kubectl run newredis --image redis -l team=alpha -``` - -Get this Pod back and check once again for labels. - -```sh -kubectl get pod newredis --show-labels -``` - -This time, you should see Kyverno did not add the `team` label with the value defined in the policy since one was already found on the Pod. - -Now that you've experienced mutate policies and seen how logic can be written easily, clean up by deleting the policy you created above. - -```sh -kubectl delete clusterpolicy add-labels -``` - -Congratulations, you've just implemented a mutation policy in your Kubernetes cluster! For more details on mutate policies, see the [mutate section](../writing-policies/mutate.md). - -### Generation - -Kyverno has the ability to generate (i.e., create) a new Kubernetes resource based upon a definition stored in a policy. Like both validate and mutate rules, Kyverno generate rules use similar concepts and structures to express policy. The generation ability is both powerful and flexible with one of its most useful aspects being, in addition to the initial generation, it has the ability to continually synchronize the resources it has generated. Generate rules can be a powerful automation tool and can solve many common challenges faced by Kubernetes operators. Let's look at one such use case in this guide. - -We will use a Kyverno generate policy to generate an image pull secret in a new Namespace. - -First, create this Kubernetes Secret in your cluster which will simulate a real image pull secret. - -```sh -kubectl -n default create secret docker-registry regcred \ - --docker-server=myinternalreg.corp.com \ - --docker-username=john.doe \ - --docker-password=Passw0rd123! \ - --docker-email=john.doe@corp.com -``` - -Next, create the following Kyverno policy. The `sync-secrets` policy will match on any newly-created Namespace and will clone the Secret we just created earlier into that new Namespace. - -```yaml -kubectl create -f- << EOF -apiVersion: kyverno.io/v1 -kind: ClusterPolicy -metadata: - name: sync-secrets -spec: - rules: - - name: sync-image-pull-secret - match: - any: - - resources: - kinds: - - Namespace - generate: - apiVersion: v1 - kind: Secret - name: regcred - namespace: "{{request.object.metadata.name}}" - synchronize: true - clone: - namespace: default - name: regcred -EOF -``` - -Create a new Namespace to test the policy. - -```sh -kubectl create ns mytestns -``` - -Get the Secrets in this new Namespace and see if `regcred` is present. - -```sh -kubectl -n mytestns get secret -``` - -You should see that Kyverno has generated the `regcred` Secret using the source Secret from the `default` Namespace as the template. If you wish, you may also modify the source Secret and watch as Kyverno synchronizes those changes down to wherever it has generated it. - -With a basic understanding of generate policies, clean up by deleting the policy you created above. - -```sh -kubectl delete clusterpolicy sync-secrets -``` - -Congratulations, you've just implemented a generation policy in your Kubernetes cluster! For more details on generate policies, see the [generate section](../writing-policies/generate.md). diff --git a/content/en/docs/introduction/admission-controllers.md b/content/en/docs/introduction/admission-controllers.md index acf06d586..c0914252d 100644 --- a/content/en/docs/introduction/admission-controllers.md +++ b/content/en/docs/introduction/admission-controllers.md @@ -99,7 +99,7 @@ In this example, the API server has been instructed to send any creation request The controller is the other half of the dynamic admission controller story. Something must be listening for the requests sent by the API server and be prepared to respond. This is typically implemented by a controller running in the same cluster as a Pod. This controller, like the API server with the webhook, must have some instruction for how to respond to requests. This instruction is provided to it in the form of a **policy**. A policy is typically another Kubernetes resource, but this time a [Custom Resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/), which the controller uses to determine that response. Once the controller examines the policy it is prepared to make a decision for resources it receives. -For example, as you may have learned in the [validation quick start section](../introduction/_index.md#validation), a policy such as `require-labels` can be used to instruct the controller how to respond in the case where it receives a matching request. If the Pod has a label named `team` then its creation will be allowed. If it does not, it will be prevented. +For example, as you may have learned in the [validation quick start section](../introduction/quick-start.md#validate-resources), a policy such as `require-labels` can be used to instruct the controller how to respond in the case where it receives a matching request. If the Pod has a label named `team` then its creation will be allowed. If it does not, it will be prevented. Controllers receiving requests from the Kubernetes API server do so over HTTP/REST. The contents of that request are a "packaging" or "wrapping" of the resource, which has been defined via the webhook, in addition to other pertinent information about who or what made the request. This package is called an `AdmissionReview`. More details on this packaging format along with an example can be seen [here](../writing-policies/jmespath.md#admissionreview). diff --git a/content/en/docs/introduction/how-kyverno-works.md b/content/en/docs/introduction/how-kyverno-works.md new file mode 100644 index 000000000..a5a88fdf4 --- /dev/null +++ b/content/en/docs/introduction/how-kyverno-works.md @@ -0,0 +1,31 @@ + +--- +title: How Kyverno Works +linkTitle: How Kyverno Works +weight: 10 +description: > + An overview of how Kyverno works +--- + + +## Kubernetes Admission Controls + +Kyverno runs as a [dynamic admission controller](./admission-controllers.md) in a Kubernetes cluster. Kyverno receives validating and mutating admission webhook HTTP callbacks from the Kubernetes API server and applies matching policies to return results that enforce admission policies or reject requests. + +Kyverno policies can match resources using the resource kind, name, label selectors, and much more. + +Mutating policies can be written as overlays (similar to [Kustomize](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/#bases-and-overlays)) or as a [RFC 6902 JSON Patch](http://jsonpatch.com/). Validating policies also use an overlay style syntax, with support for pattern matching and conditional (if-then-else) processing. + +Policy enforcement is captured using Kubernetes events. For requests that are either allowed or existed prior to introduction of a Kyverno policy, Kyverno creates Policy Reports in the cluster which contain a running list of resources matched by a policy, their status, and more. + +The diagram below shows the high-level logical architecture of Kyverno. + +Kyverno Architecture +

+ +The **Webhook** is the server which handles incoming AdmissionReview requests from the Kubernetes API server and sends them to the **Engine** for processing. It is dynamically configured by the **Webhook Controller** which watches the installed policies and modifies the webhooks to request only the resources matched by those policies. The **Cert Renewer** is responsible for watching and renewing the certificates, stored as Kubernetes Secrets, needed by the webhook. The **Background Controller** handles all generate and mutate-existing policies by reconciling UpdateRequests, an intermediary resource. And the **Report Controllers** handle creation and reconciliation of Policy Reports from their intermediary resources, Admission Reports and Background Scan Reports. + +Kyverno also supports high availability. A highly-available installation of Kyverno is one in which the controllers selected for installation are configured to run with multiple replicas. Depending on the controller, the additional replicas may also serve the purpose of increasing the scalability of Kyverno. See the [high availability page](../high-availability/_index.md) for more details on the various Kyverno controllers, their components, and how availability is handled in each one. + + + diff --git a/content/en/docs/introduction/quick-start.md b/content/en/docs/introduction/quick-start.md new file mode 100644 index 000000000..a1fb7ba39 --- /dev/null +++ b/content/en/docs/introduction/quick-start.md @@ -0,0 +1,282 @@ +--- +title: Quick Start Guides +linkTitle: Quick Start Guides +weight: 20 +description: > + An introduction to Kyverno policy and rule types +--- + +This section is intended to provide you with some quick guides on how to get Kyverno up and running and demonstrate a few of Kyverno's seminal features. There are quick start guides which focus on validation, mutation, as well as generation allowing you to select the one (or all) which is most relevant to your use case. + +These guides are intended for proof-of-concept or lab demonstrations only and not recommended as a guide for production. Please see the [installation page](../installation/_index.md) for more complete information on how to install Kyverno in production. + +First, install Kyverno from the latest release manifest. + +```sh +kubectl create -f https://github.com/kyverno/kyverno/releases/download/v1.12.0/install.yaml +``` + +Next, select the quick start guide in which you are interested. Alternatively, start at the top and work your way down. + +## Validate Resources + +In the validation guide, you will see how simple an example Kyverno policy can be which ensures a label called `team` is present on every Pod. Validation is the most common use case for policy and functions as a "yes" or "no" decision making process. Resources which are compliant with the policy are allowed to pass ("yes, this is allowed") and those which are not compliant may not be allowed to pass ("no, this is not allowed"). An additional effect of these validate policies is to produce Policy Reports. A [Policy Report](../policy-reports/_index.md) is a custom Kubernetes resource, produced and managed by Kyverno, which shows the results of policy decisions upon allowed resources in a user-friendly way. + +Add the policy below to your cluster. It contains a single validation rule that requires that all Pods have the `team` label. Kyverno supports different rule types to validate, mutate, generate, cleanup, and verify image configurations. The field `validationFailureAction` is set to `Enforce` to block Pods that are non-compliant. Using the default value `Audit` will report violations but not block requests. + +```yaml +kubectl create -f- << EOF +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-labels +spec: + validationFailureAction: Enforce + rules: + - name: check-team + match: + any: + - resources: + kinds: + - Pod + validate: + message: "label 'team' is required" + pattern: + metadata: + labels: + team: "?*" +EOF +``` + +Try creating a Deployment without the required label. + +```sh +kubectl create deployment nginx --image=nginx +``` + +You should see an error. + +```sh +error: failed to create deployment: admission webhook "validate.kyverno.svc-fail" denied the request: + +resource Deployment/default/nginx was blocked due to the following policies: + +require-labels: + autogen-check-team: 'validation error: label ''team'' is + required. Rule autogen-check-team failed at path /spec/template/metadata/labels/team/' +``` + +In addition to the error returned, Kyverno also produces an Event in the same Namespace which contains this information. + +{{% alert title="Note" color="info" %}} +Kyverno may be configured to exclude system Namespaces like `kube-system` and `kyverno`. Make sure you create the Deployment in a user-defined Namespace or the `default` Namespace (for testing only). +{{% /alert %}} + +Note that how although the policy matches on Pods, Kyverno blocked the Deployment you just created. This is because Kyverno intelligently applies policies written exclusively for Pods, using its [rule auto-generation](../writing-policies/autogen.md) feature, to all standard Kubernetes Pod controllers including the Deployment above. + +Now, create a Pod with the required label. + +```sh +kubectl run nginx --image nginx --labels team=backend +``` + +This Pod configuration is compliant with the policy and is allowed. + +Now that the Pod exists, wait just a few seconds longer and see what other action Kyverno took. Run the following command to retrieve the Policy Report that Kyverno just created. + +```sh +kubectl get policyreport -o wide +``` + +Notice that there is a single Policy Report with just one result listed under the "PASS" column. This result is due to the Pod we just created having passed the policy. + +```sh +NAME KIND NAME PASS FAIL WARN ERROR SKIP AGE +89044d72-8a1e-4af0-877b-9be727dc3ec4 Pod nginx 1 0 0 0 0 15s +``` + +If you were to describe the above policy report you would see more information about the policy and resource. + +```yaml +Results: + Message: validation rule 'check-team' passed. + Policy: require-labels + Resources: + API Version: v1 + Kind: Pod + Name: nginx + Namespace: default + UID: 07d04dc0-fbb4-479a-b049-a3d63342b354 + Result: pass + Rule: check-team + Scored: true + Source: kyverno + Timestamp: + Nanos: 0 + Seconds: 1683759146 +``` + +Policy reports are helpful in that they are both user- and tool-friendly, based upon an open standard, and separated from the policies which produced them. This separation has the benefit of report access being easy to grant and manage for other users who may not need or have access to Kyverno policies. + +Now that you've experienced validate policies and seen a bit about policy reports, clean up by deleting the policy you created above. + +```sh +kubectl delete clusterpolicy require-labels +``` + +Congratulations, you've just implemented a validation policy in your Kubernetes cluster! For more details on validation policies, see the [validate section](../writing-policies/validate.md). + +## Mutate Resources + +Mutation is the ability to change or "mutate" a resource in some way prior to it being admitted into the cluster. A mutate rule is similar to a validate rule in that it selects some type of resource (like Pods or ConfigMaps) and defines what the desired state should look like. + +Add this Kyverno mutate policy to your cluster. This policy will add the label `team` to any new Pod and give it the value of `bravo` but only if a Pod does not already have this label assigned. Kyverno has the ability to perform basic "if-then" logical decisions in a very easy way making policies trivial to write and read. The `+(team)` notation uses a Kyverno anchor to define the behavior Kyverno should take if the label key is not found. + +```yaml +kubectl create -f- << EOF +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-labels +spec: + rules: + - name: add-team + match: + any: + - resources: + kinds: + - Pod + mutate: + patchStrategicMerge: + metadata: + labels: + +(team): bravo +EOF +``` + +Let's now create a new Pod which does not have the desired label defined. + +```sh +kubectl run redis --image redis +``` + +{{% alert title="Note" color="info" %}} +Kyverno may be configured to exclude system Namespaces like `kube-system` and `kyverno`. Make sure you create the Pod in a user-defined Namespace or the `default` Namespace (for testing only). +{{% /alert %}} + +Once the Pod has been created, get the Pod to see if the `team` label was added. + +```sh +kubectl get pod redis --show-labels +``` + +You should see that the label `team=bravo` has been added by Kyverno. + +Try one more Pod, this time one which does already define the `team` label. + +```sh +kubectl run newredis --image redis -l team=alpha +``` + +Get this Pod back and check once again for labels. + +```sh +kubectl get pod newredis --show-labels +``` + +This time, you should see Kyverno did not add the `team` label with the value defined in the policy since one was already found on the Pod. + +Now that you've experienced mutate policies and seen how logic can be written easily, clean up by deleting the policy you created above. + +```sh +kubectl delete clusterpolicy add-labels +``` + +Congratulations, you've just implemented a mutation policy in your Kubernetes cluster! For more details on mutate policies, see the [mutate section](../writing-policies/mutate.md). + +## Generate Resources + +Kyverno has the ability to generate (i.e., create) a new Kubernetes resource based upon a definition stored in a policy. Like both validate and mutate rules, Kyverno generate rules use similar concepts and structures to express policy. The generation ability is both powerful and flexible with one of its most useful aspects being, in addition to the initial generation, it has the ability to continually synchronize the resources it has generated. Generate rules can be a powerful automation tool and can solve many common challenges faced by Kubernetes operators. Let's look at one such use case in this guide. + +We will use a Kyverno generate policy to generate an image pull secret in a new Namespace. + +First, create this Kubernetes Secret in your cluster which will simulate a real image pull secret. + +```sh +kubectl -n default create secret docker-registry regcred \ + --docker-server=myinternalreg.corp.com \ + --docker-username=john.doe \ + --docker-password=Passw0rd123! \ + --docker-email=john.doe@corp.com +``` +By default, Kyverno is [configured with minimal permissions](../installation/customization.md#role-based-access-controls) and does not have access to security sensitive resources like Secrets. You can provide additional permissions using cluster role aggregation. The following role permits the Kyverno background-controller to create (clone) secrets. + +```yaml +kubectl create -f- << EOF +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kyverno:secrets:manage + labels: + rbac.kyverno.io/aggregate-to-background-controller: "true" +rules: +- apiGroups: + - '' + resources: + - secrets + verbs: + - create +EOF +``` + +Next, create the following Kyverno policy. The `sync-secrets` policy will match on any newly-created Namespace and will clone the Secret we just created earlier into that new Namespace. + +```yaml +kubectl create -f- << EOF +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: sync-secrets +spec: + rules: + - name: sync-image-pull-secret + match: + any: + - resources: + kinds: + - Namespace + generate: + apiVersion: v1 + kind: Secret + name: regcred + namespace: "{{request.object.metadata.name}}" + synchronize: false + clone: + namespace: default + name: regcred +EOF +``` + +Create a new Namespace to test the policy. + +```sh +kubectl create ns mytestns +``` + +Get the Secrets in this new Namespace and see if `regcred` is present. + +```sh +kubectl -n mytestns get secret +``` + +You should see that Kyverno has generated the `regcred` Secret using the source Secret from the `default` Namespace as the template. If you wish, you may also modify the source Secret and watch as Kyverno synchronizes those changes down to wherever it has generated it. + +With a basic understanding of generate policies, clean up by deleting the policy you created above. + +```sh +kubectl delete clusterpolicy sync-secrets +``` + +Congratulations, you've just implemented a generation policy in your Kubernetes cluster! For more details on generate policies, see the [generate section](../writing-policies/generate.md). + + diff --git a/content/en/docs/kyverno-chainsaw/_index.md b/content/en/docs/kyverno-chainsaw/_index.md new file mode 100644 index 000000000..e121b2d70 --- /dev/null +++ b/content/en/docs/kyverno-chainsaw/_index.md @@ -0,0 +1,18 @@ +--- +title: Kyverno Chainsaw +description: Declarative e2e testing +weight: 410 +hide_feedback: true +--- + +Kyverno Chainsaw is a sub-project of Kyverno provides declarative end-to-end testing for Kubernetes controllers. + +
+ + Website + + + GitHub + +
+ diff --git a/content/en/docs/kyverno-cli/usage/apply.md b/content/en/docs/kyverno-cli/usage/apply.md index e75e3c114..71acdb958 100644 --- a/content/en/docs/kyverno-cli/usage/apply.md +++ b/content/en/docs/kyverno-cli/usage/apply.md @@ -811,7 +811,7 @@ With the `apply` command, Kubernetes ValidatingAdmissionPolicies can be applied Policy manifest (check-deployment-replicas.yaml): ```yaml -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicy metadata: name: check-deployments-replicas @@ -868,7 +868,7 @@ The below example applies a `ValidatingAdmissionPolicyBinding` along with the po Policy manifest (check-deployment-replicas.yaml): ```yaml -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicy metadata: name: "check-deployment-replicas" @@ -887,7 +887,7 @@ spec: validations: - expression: object.spec.replicas <= 5 --- -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicyBinding metadata: name: "check-deployment-replicas-binding" diff --git a/content/en/docs/kyverno-cli/usage/test.md b/content/en/docs/kyverno-cli/usage/test.md index 545a91923..7472181e9 100644 --- a/content/en/docs/kyverno-cli/usage/test.md +++ b/content/en/docs/kyverno-cli/usage/test.md @@ -49,7 +49,7 @@ results: resources: # optional, primarily for `validate` rules. - - - patchedResource: # when testing a mutate rule this field is required. + patchedResources: # when testing a mutate rule this field is required. generatedResource: # when testing a generate rule this field is required. cloneSourceResource: # when testing a generate rule that uses `clone` object this field is required. kind: @@ -61,6 +61,7 @@ checks: rule: {} # match results associated with a rule assert: {} # assertion to validate the content of matched elements error: {} # negative assertion to validate the content of matched elements +``` The test declaration consists of the following parts: @@ -70,7 +71,7 @@ The test declaration consists of the following parts: 4. The `variables` element which defines a file in which variables and their values are stored for use in the policy test. Optional depending on policy content. 5. The `userinfo` element which declares admission request data for subjects and roles. Optional depending on policy content. 6. The `results` element which declares the expected results. Depending on the type of rule being tested, this section may vary. -7. The `checks` element which declares the assertions to be evaluted against the results (see [Working with Assertion Trees](../assertion-trees.md)). +7. The `checks` element which declares the assertions to be evaluated against the results (see [Working with Assertion Trees](../assertion-trees.md)). If needing to pass variables, such as those from [external data sources](../../writing-policies/external-data-sources.md) like context variables built from [API calls](../../writing-policies/external-data-sources.md#variables-from-kubernetes-api-server-calls) or others, a `variables.yaml` file can be defined with the same format as accepted with the `apply` command. If a variable needs to contain an array of strings, it must be formatted as JSON encoded. Like with the `apply` command, variables that begin with `request.object` normally do not need to be specified in the variables file as these will be sourced from the resource. Policies which trigger based upon `request.operation` equaling `CREATE` do not need a variables file. The CLI will assume a value of `CREATE` if no variable for `request.operation` is defined. @@ -235,8 +236,6 @@ spec: - resources: kinds: - Pod - clusterRoles: - - cluster-admin validate: message: "An image tag is required." pattern: @@ -286,12 +285,14 @@ resources: results: - policy: disallow-latest-tag rule: require-image-tag - resource: myapp-pod + resources: + - myapp-pod kind: Pod result: pass - policy: disallow-latest-tag rule: validate-image-tag - resource: myapp-pod + resources: + - myapp-pod kind: Pod result: pass ``` @@ -299,15 +300,21 @@ results: ```sh $ kyverno test . -Executing disallow_latest_tag... -applying 1 policy to 1 resource... +Loading test ( kyverno-test.yaml ) ... + Loading values/variables ... + Loading policies ... + Loading resources ... + Loading exceptions ... + Applying 1 policy to 1 resource ... + Checking results ... + +│────│─────────────────────│────────────────────│───────────────│────────│────────│ +│ ID │ POLICY │ RULE │ RESOURCE │ RESULT │ REASON │ +│────│─────────────────────│────────────────────│───────────────│────────│────────│ +│ 1 │ disallow-latest-tag │ require-image-tag │ Pod/myapp-pod │ Pass │ Ok │ +│ 2 │ disallow-latest-tag │ validate-image-tag │ Pod/myapp-pod │ Pass │ Ok │ +│────│─────────────────────│────────────────────│───────────────│────────│────────│ -│───│─────────────────────│────────────────────│───────────────────────│────────│ -│ # │ POLICY │ RULE │ RESOURCE │ RESULT │ -│───│─────────────────────│────────────────────│───────────────────────│────────│ -│ 1 │ disallow-latest-tag │ require-image-tag │ default/Pod/myapp-pod │ Pass │ -│ 2 │ disallow-latest-tag │ validate-image-tag │ default/Pod/myapp-pod │ Pass │ -│───│─────────────────────│────────────────────│───────────────────────│────────│ Test Summary: 2 tests passed and 0 tests failed ``` @@ -407,14 +414,16 @@ variables: values.yaml results: - policy: add-default-resources rule: add-default-requests - resource: nginx-demo1 - patchedResource: patchedResource1.yaml + resources: + - nginx-demo1 + patchedResources: patchedResource1.yaml kind: Pod result: pass - policy: add-default-resources rule: add-default-requests - resource: nginx-demo2 - patchedResource: patchedResource2.yaml + resources: + - nginx-demo2 + patchedResources: patchedResource2.yaml kind: Pod result: skip ``` @@ -505,7 +514,8 @@ resources: results: - policy: add-networkpolicy rule: default-deny - resource: hello-world-namespace + resources: + - hello-world-namespace generatedResource: generatedResource.yaml kind: Namespace result: pass @@ -691,7 +701,7 @@ Below is an example of testing a ValidatingAdmissionPolicy against two resources Policy manifest (disallow-host-path.yaml): ```yaml -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicy metadata: name: disallow-host-path @@ -774,12 +784,14 @@ resources: - deployments.yaml results: - policy: disallow-host-path - resource: deployment-pass + resources: + - deployment-pass isValidatingAdmissionPolicy: true kind: Deployment result: pass - policy: disallow-host-path - resource: deployment-fail + resources: + - deployment-fail isValidatingAdmissionPolicy: true kind: Deployment result: fail @@ -811,7 +823,7 @@ In the below example, a `ValidatingAdmissionPolicy` and its corresponding `Valid Policy manifest (`check-deployment-replicas.yaml`): ```yaml -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicy metadata: name: "check-deployment-replicas" @@ -830,7 +842,7 @@ spec: validations: - expression: object.spec.replicas <= 2 --- -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicyBinding metadata: name: "check-deployment-replicas-binding" @@ -989,7 +1001,7 @@ Variables manifest (`values.yaml`): ```yaml apiVersion: cli.kyverno.io/v1alpha1 -kind: Value +kind: Values metadata: name: values namespaceSelector: diff --git a/content/en/docs/kyverno-json/_index.md b/content/en/docs/kyverno-json/_index.md new file mode 100644 index 000000000..c80950705 --- /dev/null +++ b/content/en/docs/kyverno-json/_index.md @@ -0,0 +1,18 @@ +--- +title: Kyverno JSON +description: Apply Kyverno policies anywhere +weight: 400 +hide_feedback: true +--- + +Kyverno JSON is a sub-project of Kyverno that allows applying Kyverno policies to non-Kubernetes workloads. It works on any JSON payload. + + + diff --git a/content/en/docs/kyverno-policy-reporter/_index.md b/content/en/docs/kyverno-policy-reporter/_index.md new file mode 100644 index 000000000..6214960ce --- /dev/null +++ b/content/en/docs/kyverno-policy-reporter/_index.md @@ -0,0 +1,18 @@ +--- +title: Policy Reporter +description: Apply Kyverno policies anywhere +weight: 420 +hide_feedback: true +--- + +Policy Reporter is a sub-project of Kyverno that provides in-cluster management of policy reports with a web-based graphical user interface. + + + diff --git a/content/en/docs/monitoring/_index.md b/content/en/docs/monitoring/_index.md index b37819711..ac341ab5e 100644 --- a/content/en/docs/monitoring/_index.md +++ b/content/en/docs/monitoring/_index.md @@ -138,11 +138,19 @@ metricsConfig: # Per Metric configuration, allows disabling metrics, dropping labels and change the bucket boundaries. metricsExposure: + # Counter disabled + kyverno_policy_rule_info_total: + enabled: false + # Histogram disabled + kyverno_admission_review_duration_seconds: + enabled: false + # Counter with customized dimensions + kyverno_admission_requests: + disabledLabelDimensions: ["resource_namespace", "resource_kind", "resource_request_operation"] + # Histogram with custom boundaries and dimensions kyverno_policy_execution_duration_seconds: disabledLabelDimensions: ["resource_kind", "resource_namespace", "resource_request_operation"] bucketBoundaries: [0.005, 0.01, 0.025] - kyverno_admission_review_duration_seconds: - enabled: false ... ``` diff --git a/content/en/docs/monitoring/bonus-grafana-dashboard/index.md b/content/en/docs/monitoring/bonus-grafana-dashboard/index.md index 675c50e53..d577187be 100644 --- a/content/en/docs/monitoring/bonus-grafana-dashboard/index.md +++ b/content/en/docs/monitoring/bonus-grafana-dashboard/index.md @@ -40,3 +40,122 @@ curl -fsS https://raw.githubusercontent.com/kyverno/kyverno/main/charts/kyverno/ ![Dashboard example 1](dashboard-example-1.png) ![Dashboard example 2](dashboard-example-2.png) + +### Tutorial + +```sh +kind create cluster +``` + +Add Helm repositories. + +```sh +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo add kyverno https://kyverno.github.io/kyverno/ +``` + +Update Helm repositories. + +```sh +helm repo update +``` + +Install Kyverno and the kube-prometheus-stack Helm chart. + +```sh +helm install kyverno kyverno/kyverno --namespace kyverno --create-namespace +helm install monitoring prometheus-community/kube-prometheus-stack -n monitoring --create-namespace + +kubectl -n monitoring get po -l "release" +``` + +Create the ServiceMonitor resource. + + +Add the following contents to a file named `service-monitor.yaml`. + +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app.kubernetes.io/instance: monitoring + chart: kube-prometheus-stack-51.2.0 + heritage: Helm + release: monitoring + name: service-monitor-kyverno-service + namespace: monitoring +spec: + endpoints: + - interval: 60s + path: /metrics + scheme: http + targetPort: 8000 + tlsConfig: + insecureSkipVerify: true + namespaceSelector: + matchNames: + - kyverno + selector: + matchLabels: + app.kubernetes.io/instance: kyverno +``` + +Add the appropriate labels. + +```sh +kubectl label ns kyverno app.kubernetes.io/instance=kyverno +kubectl label ns kyverno app.kubernetes.io/name=kyverno +``` + +Create the ServiceMonitor. + +```sh +kubectl apply -f service-monitor.yaml +``` + +Restart Deployments and StatefulSets in the `monitoring` Namespace. + +```sh +kubectl rollout restart deploy,sts -n monitoring +``` + +Check services in the `monitoring` Namespace. + +```sh +kubectl get svc -n monitoring + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +monitoring-kube-prometheus-prometheus ClusterIP 10.96.238.189 9090/TCP,8080/TCP 4h16m +``` + +Port forward the `monitoring-kube-prometheus-prometheus` Service to a local port. + +```sh +kubectl port-forward svc/monitoring-kube-prometheus-prometheus 81:9090 -n monitoring + +Forwarding from 127.0.0.1:81 -> 9090 +Forwarding from [::1]:81 -> 9090 +``` + +Similarly, port forward the `monitoring-grafana` Service to another local port. + +```sh +kubectl get svc -n monitoring + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +monitoring-grafana ClusterIP 10.96.188.20 80/TCP 4h19m + +kubectl port-forward svc/monitoring-grafana -n monitoring 82:3000 + +Forwarding from 127.0.0.1:82 -> 3000 +Forwarding from [::1]:82 -> 3000 +``` + +Go to Prometheus on port 9090 and check status -> Targets -> Filter for kyverno (to see if metrics are getting scraped) + +Go to Grafana on port 3000 -> Dashboards -> New -> import -> Upload file that you get from running the below command -> Data type = Prometheus -> import + +```sh +curl https://raw.githubusercontent.com/kyverno/grafana-dashboard/master/grafana/dashboard.json -o kyverno-dashboard.json +``` diff --git a/content/en/docs/monitoring/policy-results-info.md b/content/en/docs/monitoring/policy-results-info.md index 47f6bb545..57f2d485a 100644 --- a/content/en/docs/monitoring/policy-results-info.md +++ b/content/en/docs/monitoring/policy-results-info.md @@ -6,7 +6,7 @@ weight: 20 #### Metric Name(s) -* `kyverno_policy_results_total` +* `kyverno_policy_results` #### Metric Value @@ -39,10 +39,10 @@ Counter - An only-increasing integer representing the number of results/executio #### Useful Queries * Tracking the total number of rules which failed in the 24 hours in "default" namespace grouped by their rule types (validate, mutate, generate):
-`sum(increase(kyverno_policy_results_total{policy_namespace="default", rule_result="fail"}[24h])) by (rule_type)` +`sum(increase(kyverno_policy_results{policy_namespace="default", rule_result="fail"}[24h])) by (rule_type)` * Tracking the per-minute rate of the number of rule executions triggered by incoming Pod requests over the cluster:
-`rate(kyverno_policy_results_total{resource_kind="Pod", rule_execution_cause="admission_request"}[1m])*60` +`rate(kyverno_policy_results{resource_kind="Pod", rule_execution_cause="admission_request"}[1m])*60` * Tracking the total number of policies over the cluster running as a part of background scans over the last 2 hours:
-`count(increase(kyverno_policy_results_total{rule_execution_cause="background_scan"}[2h]) by (policy_name))` +`count(increase(kyverno_policy_results{rule_execution_cause="background_scan"}[2h]) by (policy_name))` diff --git a/content/en/docs/policy-reports/_index.md b/content/en/docs/policy-reports/_index.md index c117cab4d..04db46c78 100644 --- a/content/en/docs/policy-reports/_index.md +++ b/content/en/docs/policy-reports/_index.md @@ -98,6 +98,66 @@ Entries in a policy report contain a `result` field which can be either `pass`, | warn | The annotation `policies.kyverno.io/scored` has been set to `"false"` in the policy converting otherwise `fail` results to `warn`. | | error | Variable substitution failed outside of preconditions and elsewhere in the rule (ex., in the pattern). | +### Scenarios for skipped evaluations + +A `skip` result signifies that Kyverno decided not to fully evaluate the resource against a specific rule. This is different from a pass where the resource was evaluated and deemed compliant. A `skip` means the rule was essentially bypassed. + +Here's a breakdown of common scenarios resulting in a `skip`: + +1. **Preconditions Not Met:** + +This is the most frequent reason for a skip. If a rule has preconditions defined and any of the conditions within the any or all blocks evaluate to FALSE, the entire rule is skipped. Kyverno won't even attempt to apply the pattern, effectively bypassing the rule. + +2. **Policy Exceptions:** + +Kyverno allows you to define exceptions to policies using PolicyException resources. If an exception exists that matches a specific resource and rule, Kyverno will skip the rule for that resource. + +3. **Conditional Anchors `()` with Unmet Conditions:** + +When using a conditional anchor, the corresponding section is skipped if the condition within the anchor evaluates to FALSE. + +4. **Global Anchors `<()` with Unmet Conditions:** + +Similar to conditional anchors, if the condition inside a global anchor is FALSE, the entire rule is skipped. The difference is that global anchors apply to the whole rule, not just a specific section. + +5. **Anchor Logic Resulting in Skip:** + +As explained in the [validate documentation](../writing-policies/validate.md), a combination of anchors and their evaluation results can lead to a skip. Specifically, a conditional anchor might be skipped, but if it's a sibling to another condition that results in a pass or fail, the overall result will reflect that of the sibling, potentially masking the skip. + +*Example:* If we have the following policy: + +```yaml +spec: + =(initContainers): + - (name): "!istio-init" + =(securityContext): + =(runAsUser): ">0" + =(containers): + - =(securityContext): + =(runAsUser): ">0" +``` + +The following resource would result in pass: + +```yaml +spec: + initContainers: + - name: istio-init + securityContext: + runAsUser: 0 + containers: + - name: nginx + image: nginx +``` + +That's because for the `initContainers` block the condition isn't met so it's a skip. But the `containers` block is a pass. So the overall result is a pass. + +**Key Points to Remember:** + +* A skip result is not a failure; it's a deliberate bypass based on predefined conditions or exceptions. +* Understanding the distinction between pass and skip is crucial for accurately interpreting policy report data. +* When troubleshooting a skip, carefully examine preconditions, exceptions, and the logic within your anchors to pinpoint the reason for the bypass. + ## Viewing policy report summaries You can view a summary of the Namespaced policy reports using the following command: diff --git a/content/en/docs/policy-reports/validatingadmissionpolicy-reports.md b/content/en/docs/policy-reports/validatingadmissionpolicy-reports.md index 2040e3e28..9baf38b28 100644 --- a/content/en/docs/policy-reports/validatingadmissionpolicy-reports.md +++ b/content/en/docs/policy-reports/validatingadmissionpolicy-reports.md @@ -14,7 +14,7 @@ To configure Kyverno to generate reports for ValidatingAdmissionPolicies, set th Create a ValidatingAdmissionPolicy that checks the Deployment replicas and a ValidatingAdmissionPolicyBinding that binds the policy to a namespace whose labels set to `environment: staging`. ```yaml -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicy metadata: name: "check-deployment-replicas" @@ -33,7 +33,7 @@ spec: validations: - expression: object.spec.replicas <= 5 --- -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicyBinding metadata: name: "check-deployment-replicas-binding" diff --git a/content/en/docs/releases/_index.md b/content/en/docs/releases/_index.md index dc4146ab1..30c7b7120 100644 --- a/content/en/docs/releases/_index.md +++ b/content/en/docs/releases/_index.md @@ -65,4 +65,4 @@ A minor release will contain a mix of features, enhancements, and bug fixes. Major features follow the [Kyverno Design Proposal (KDP)](https://github.com/kyverno/KDP/) process. -During the start of a release, there may be many issues assigned to the release milestone. The priorities for the release are discussed in the weekly contributor's meetings. As the release progresses several issues may be moved to the next milestone. Hence, if an issue is important it is important to advocate its priority early in the release cycle. +During the start of a release, there may be many issues assigned to the release milestone. The priorities for the release are discussed in the weekly maintainer's meetings. As the release progresses several issues may be moved to the next milestone. Hence, if an issue is important it is important to advocate its priority early in the release cycle. diff --git a/content/en/docs/security/_index.md b/content/en/docs/security/_index.md index 8c26dca4b..1267bd329 100644 --- a/content/en/docs/security/_index.md +++ b/content/en/docs/security/_index.md @@ -62,7 +62,7 @@ With each release, the following artifacts are uploaded (where CLI binaries incl ## Verifying Kyverno Container Images -Kyverno container images are signed using Cosign and the [keyless signing feature](https://docs.sigstore.dev/cosign/verify/). The signatures are stored in a separate repository from the container image they reference located at `ghcr.io/kyverno/signatures`. To verify the container image using Cosign v1.x, follow the steps below. +Kyverno container images are signed using Cosign and the [keyless signing feature](https://docs.sigstore.dev/cosign/verifying/verify/). The signatures are stored in a separate repository from the container image they reference located at `ghcr.io/kyverno/signatures`. To verify the container image using Cosign v1.x, follow the steps below. 1. Install [Cosign](https://github.com/sigstore/cosign#installation) 2. Configure the Kyverno signature repository: @@ -242,7 +242,7 @@ Kyverno Pods are configured to follow security best practices and conform to the ### RBAC -The Kyverno RBAC configurations are described in the [installation](../installation/customization.md#roles-and-permissions) section. +The Kyverno RBAC configurations are described in the [installation](../installation/customization.md#role-based-access-controls) section. Use the following command to view all Kyverno roles: @@ -352,7 +352,7 @@ The sections below list each threat, mitigation, and provide Kyverno specific de * [Mitigation ID 1 - RBAC rights are strictly controlled](https://github.com/kubernetes/sig-security/blob/main/sig-security-docs/papers/admission-control/kubernetes-admission-control-threat-model.md#mitigation-id-1---rbac-rights-are-strictly-controlled) - Kyverno RBAC configurations are described in the [installation section](../installation/customization.md#roles-and-permissions). The `kyverno:admission-controller` role is used by Kyverno to configure webhooks. It is important to limit Kyverno to the required permissions and audit changes in the RBAC roles and role bindings. + Kyverno RBAC configurations are described in the [installation section](../installation/customization.md#role-based-access-controls). The `kyverno:admission-controller` role is used by Kyverno to configure webhooks. It is important to limit Kyverno to the required permissions and audit changes in the RBAC roles and role bindings. ### Threat ID 5 - Attacker gets access to valid credentials for the webhook @@ -420,7 +420,7 @@ The sections below list each threat, mitigation, and provide Kyverno specific de * [Mitigation ID 1 - RBAC rights are strictly controlled](https://github.com/kubernetes/sig-security/blob/main/sig-security-docs/papers/admission-control/kubernetes-admission-control-threat-model.md#mitigation-id-1---rbac-rights-are-strictly-controlled) - Kyverno RBAC configurations are described in the [configuration section](../installation/customization.md#roles-and-permissions). The `kyverno:admission-controller` role is used by Kyverno to configure webhooks. It is important to limit Kyverno to the required permissions and audit changes in the RBAC roles and role bindings. + Kyverno RBAC configurations are described in the [configuration section](../installation/customization.md#role-based-access-controls). The `kyverno:admission-controller` role is used by Kyverno to configure webhooks. It is important to limit Kyverno to the required permissions and audit changes in the RBAC roles and role bindings. Kyverno excludes certain critical system Namespaces by default including the Kyverno Namespace itself. These exclusions can be managed and configured via the [ConfigMap](../installation/customization.md#configmap-keys). diff --git a/content/en/docs/troubleshooting/_index.md b/content/en/docs/troubleshooting/_index.md index f850dab0f..c5e7c6285 100644 --- a/content/en/docs/troubleshooting/_index.md +++ b/content/en/docs/troubleshooting/_index.md @@ -104,7 +104,7 @@ You can also follow the steps on the [Kyverno wiki](https://github.com/kyverno/k **Symptom**: I'm using AKS and Kyverno is using too much memory or CPU or produces many audit logs -**Solution**: On AKS the Kyverno webhooks will be mutated by the AKS [Admissions Enforcer](https://learn.microsoft.com/en-us/azure/aks/faq#can-admission-controller-webhooks-impact-kube-system-and-internal-aks-namespaces) plugin, that can lead to an endless update loop. To prevent that behavior, set the annotation `"admissions.enforcer/disabled": true` to all Kyverno webhooks. When installing via Helm, the annotation can be added with `config.webhookAnnotations`. +**Solution**: On AKS the Kyverno webhooks will be mutated by the AKS [Admissions Enforcer](https://learn.microsoft.com/en-us/azure/aks/faq#can-admission-controller-webhooks-impact-kube-system-and-internal-aks-namespaces) plugin, that can lead to an endless update loop. To prevent that behavior, set the annotation `"admissions.enforcer/disabled": true` to all Kyverno webhooks. When installing via Helm, the annotation can be added with `config.webhookAnnotations`. As of Kyverno 1.12, this configuration is enabled by default. ## Kyverno is slow to respond @@ -134,7 +134,7 @@ You can also follow the steps on the [Kyverno wiki](https://github.com/kyverno/k **Symptom**: I'm using GKE and after installing Kyverno, my cluster is either broken or I'm seeing timeouts and other issues. -**Solution**: Private GKE clusters do not allow certain communications from the control planes to the workers, which Kyverno requires to receive webhooks from the API server. In order to resolve this issue, create a firewall rule which allows the control plane to speak to workers on the Kyverno TCP port which, by default at this time, is 9443. +**Solution**: Private GKE clusters do not allow certain communications from the control planes to the workers, which Kyverno requires to receive webhooks from the API server. In order to resolve this issue, create a firewall rule which allows the control plane to speak to workers on the Kyverno TCP port which, by default at this time, is 9443. For more details, see the [GKE documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules). ## Kyverno fails on EKS diff --git a/content/en/docs/writing-policies/cleanup.md b/content/en/docs/writing-policies/cleanup.md index 68a9ed6b5..8fba6ad83 100644 --- a/content/en/docs/writing-policies/cleanup.md +++ b/content/en/docs/writing-policies/cleanup.md @@ -5,10 +5,6 @@ description: > weight: 70 --- -{{% alert title="Warning" color="warning" %}} -Cleanup policies are a **beta** feature. It is not ready for production usage and there may be breaking changes. Normal semantic versioning and compatibility rules will not apply. -{{% /alert %}} - Kyverno has the ability to cleanup (i.e., delete) existing resources in a cluster in two different ways. The first way is via a declarative policy definition in either a `CleanupPolicy` or `ClusterCleanupPolicy`. See the section on [cleanup policies](#cleanup-policy) below for more details. The second way is via a reserved time-to-live (TTL) label added to a resource. See the [cleanup label](#cleanup-label) section for further details. ## Cleanup Policy @@ -16,7 +12,7 @@ Kyverno has the ability to cleanup (i.e., delete) existing resources in a cluste Similar to other policies which can validate, mutate, generate, or verify images in resources, Kyverno can cleanup resources by defining a new policy type called a `CleanupPolicy`. Cleanup policies come in both cluster-scoped and Namespaced flavors; a `ClusterCleanupPolicy` being cluster scoped and a `CleanupPolicy` being Namespaced. A cleanup policy uses the familiar `match`/`exclude` block to select and exclude resources which are subjected to the cleanup process. A `conditions{}` block (optional) uses common expressions similar to those found in [preconditions](preconditions.md) and [deny rules](validate.md#deny-rules) to query the contents of the selected resources in order to refine the selection process. [Context variables](external-data-sources.md) (optional) can be used to fetch data from other resources to factor into the cleanup process. And, lastly, a `schedule` field defines, in cron format, when the rule should run. {{% alert title="Note" color="info" %}} -Since cleanup policies always operate against existing resources in a cluster, policies created with `subjects`, `Roles`, or `ClusterRoles` in the `match`/`exclude` block are not allowed since this information is only known at admission time. +Since cleanup policies always operate against existing resources in a cluster, policies created with `subjects`, `Roles`, or `ClusterRoles` in the `match`/`exclude` block are not allowed since this information is only known at admission time. Additionally, `operations[]`, while permitted, are ignored as the only trigger is schedule based. {{% /alert %}} An example ClusterCleanupPolicy is shown below. This cleanup policy removes Deployments which have the label `canremove: "true"` if they have less than two replicas on a schedule of every 5 minutes. diff --git a/content/en/docs/writing-policies/exceptions.md b/content/en/docs/writing-policies/exceptions.md index 89b019cdc..8ec57660d 100644 --- a/content/en/docs/writing-policies/exceptions.md +++ b/content/en/docs/writing-policies/exceptions.md @@ -5,10 +5,6 @@ description: > weight: 80 --- -{{% alert title="Warning" color="warning" %}} -Policy exceptions are a **beta** feature. Normal semantic versioning and compatibility rules will not apply. -{{% /alert %}} - Although Kyverno policies contain multiple methods to provide fine-grained control as to which resources they act upon in the form of [`match`/`exclude` blocks](match-exclude.md#match-statements), [preconditions](preconditions.md) at multiple hierarchies, [anchors](validate.md#anchors), and more, all these mechanisms have in common that the resources which they are intended to exclude must occur in the same rule definition. This may be limiting in situations where policies may not be directly editable, or doing so imposes an operational burden. For example, in organizations where multiple teams must interact with the same cluster, a team responsible for policy authoring and administration may not be the same team responsible for submission of resources. In these cases, it can be advantageous to decouple the policy definition from certain exclusions. Additionally, there are often times where an organization or team must allow certain exceptions which would violate otherwise valid rules but on a one-time basis if the risks are known and acceptable. diff --git a/content/en/docs/writing-policies/external-data-sources.md b/content/en/docs/writing-policies/external-data-sources.md index 43a71f72c..3d64e9153 100644 --- a/content/en/docs/writing-policies/external-data-sources.md +++ b/content/en/docs/writing-policies/external-data-sources.md @@ -714,6 +714,20 @@ The data returned by GlobalContextEntries may vary depending on whether it is a GlobalContextEntries must be in a healthy state (i.e., there is a response received from the remote endpoint) in order for the policies which reference them to be considered healthy. A GlobalContextEntry which is in a `not ready` state will cause any/all referenced policies to also be in a similar state and therefore will not be processed. Creation of a policy referencing a GlobalContextEntry which either does not exist or is not ready will print a warning notifying users. {{% /alert %}} +#### Default values for API calls +In the case where the api server returns an error, `default` can be used to provide a fallback value for the api call context entry. The following example shows how to add default value to context entries: + +```yaml +... + context: + - name: currentnamespace + apiCall: + urlPath: "/api/v1/namespaces/{{ request.namespace }}" + jmesPath: metadata.name + default: default +... +``` + ## Variables from Image Registries A context can also be used to store metadata on an OCI image by using the `imageRegistry` context type. By using this external data source, a Kyverno policy can make decisions based on details of the container image that occurs as part of an incoming resource. @@ -736,6 +750,7 @@ the output `imageData` variable will have a structure which looks like the follo "registry": "ghcr.io", "repository": "kyverno/kyverno", "identifier": "latest", + "imageIndex": imageIndex, "manifest": manifest, "configData": config, } @@ -755,7 +770,7 @@ The `imageData` variable represents a "normalized" view of an image after any re ``` {{% /alert %}} -The `manifest` and `config` keys contain the output from `crane manifest ` and `crane config ` respectively. +The `imageIndex`, `manifest` and `config` keys contain the output from `crane manifest ` and `crane config ` respectively. For example, one could inspect the labels, entrypoint, volumes, history, layers, etc of a given image. Using the [crane](https://github.com/google/go-containerregistry/tree/main/cmd/crane) tool, show the config of the `ghcr.io/kyverno/kyverno:latest` image: diff --git a/content/en/docs/writing-policies/generate.md b/content/en/docs/writing-policies/generate.md index 641f26536..ee29ade0b 100644 --- a/content/en/docs/writing-policies/generate.md +++ b/content/en/docs/writing-policies/generate.md @@ -116,7 +116,7 @@ spec: apiVersion: networking.k8s.io/v1 name: deny-all-traffic namespace: "{{request.object.metadata.name}}" - data: + data: spec: # select all pods in the namespace podSelector: {} @@ -128,7 +128,7 @@ spec: For other examples of generate rules, see the [policy library](/policies/?policytypes=generate). {{% alert title="Note" color="info" %}} -The field `spec.generateExistingOnPolicyUpdate` is no longer required for "classic" generate rules, is deprecated, and will be removed in an upcoming version. +The field `spec.generateExisting` is no longer required for "classic" generate rules, is deprecated, and will be removed in an upcoming version. {{% /alert %}} ## Clone Source @@ -223,6 +223,183 @@ spec: matchLabels: allowedToBeCloned: "true" ``` +## foreach + +The `foreach` declaration allows generation of multiple target resources of sub-elements in resource declarations. Each `foreach` entry must contain a `list` attribute, written as a JMESPath expression without braces, that defines the sub-elements it processes. For example, creating networkpolicies for a list of Namespaces which is stored in a label: + +``` +list: request.object.metadata.labels.namespaces | split(@, ',') +``` + +When a `foreach` is processed, the Kyverno engine will evaluate `list` as a JMESPath expression to retrieve zero or more sub-elements for further processing. The value of the `list` field may also resolve to a simple array of strings, for example as defined in a context variable. The value of the `list` field should not be enclosed in braces even though it is a JMESPath expression. + +A variable `element` is added to the processing context on each iteration. This allows referencing data in the element using `element.` where name is the attribute name. For example, using the list `request.object.spec.containers` when the `request.object` is a Pod allows referencing the container image as `element.image` within a `foreach`. An additional variable called `elementIndex` is made available which allows the current index number to be referenced in a loop. + +The following child declarations are permitted in a `foreach`: + +- [Data Source](#data-source) +- [Clone Source](#clone-source) + + +In addition, each `foreach` declaration can contain the following declarations: + +- [Context](external-data-sources.md): to add additional external data only available per loop iteration. +- [Preconditions](preconditions.md): to control when a loop iteration is skipped. + +Here is a complete example of data source type of `foreach` declaration that creates a NetworkPolicy into a list of existing namespaces which is stored as a comma-separated string in a ConfigMap. + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: foreach-generate-data +spec: + rules: + - match: + any: + - resources: + kinds: + - ConfigMap + name: k-kafka-address + generate: + generateExisting: false + synchronize: true + orphanDownstreamOnPolicyDelete: false + foreach: + - list: request.object.data.namespaces | split(@, ',') + context: + - name: ns + variable: + jmesPath: element + preconditions: + any: + - key: '{{ ns }}' + operator: AnyIn + value: + - foreach-ns-1 + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + name: my-networkpolicy-{{element}}-{{ elementIndex }} + namespace: '{{ element }}' + data: + metadata: + labels: + request.namespace: '{{ request.object.metadata.name }}' + element: '{{ element }}' + elementIndex: '{{ elementIndex }}' + spec: + podSelector: {} + policyTypes: + - Ingress + - Egress +``` + +For a complete example of clone source type of foreach declaration that clones the source Secret into a list of matching existing namespaces which is stored as a comma-separated string in a ConfigMap, see below. + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: foreach-clone +spec: + rules: + - match: + any: + - resources: + kinds: + - ConfigMap + name: k-kafka-address + context: + - name: configmapns + variable: + jmesPath: request.object.metadata.namespace + preconditions: + any: + - key: '{{configmapns}}' + operator: Equals + value: 'default' + generate: + generateExisting: false + synchronize: true + foreach: + - list: request.object.data.namespaces | split(@, ',') + context: + - name: ns + variable: + jmesPath: element + preconditions: + any: + - key: '{{ ns }}' + operator: AnyIn + value: + - foreach-ns-1 + apiVersion: v1 + kind: Secret + name: cloned-secret-{{ elementIndex }}-{{ ns }} + namespace: '{{ ns }}' + clone: + namespace: default + name: source-secret +``` + +See the triggering ConfigMap below, the `data` contains a `namespaces` field defines multiple namespaces. +```yaml +kind: ConfigMap +apiVersion: v1 +metadata: + name: default-deny + namespace: default +data: + namespaces: foreach-ns-1,foreach-ns-2 +``` + +Similarly as above, here is an example of clone list type of `foreach` declaration that clones a list of secrets based on the label selector. +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: foreach-cpol-clone-list-sync-delete-source +spec: + rules: + - match: + any: + - resources: + kinds: + - ConfigMap + name: k-kafka-address + context: + - name: configmapns + variable: + jmesPath: request.object.metadata.namespace + preconditions: + any: + - key: '{{configmapns}}' + operator: Equals + value: '{{request.object.metadata.namespace}}' + generate: + generateExisting: false + synchronize: true + foreach: + - list: request.object.data.namespaces | split(@, ',') + context: + - name: ns + variable: + jmesPath: element + preconditions: + any: + - key: '{{ ns }}' + operator: AnyIn + value: + - foreach-cpol-clone-list-sync-delete-source-target-ns-1 + namespace: '{{ ns }}' + cloneList: + kinds: + - v1/Secret + namespace: foreach-cpol-clone-list-sync-delete-source-existing-ns + selector: + matchLabels: + allowedToBeCloned: "true" +``` ## Generating Bindings @@ -267,7 +444,7 @@ spec: apiVersion: rbac.authorization.k8s.io/v1 name: steven-rolebinding namespace: "{{request.object.metadata.name}}" - data: + data: subjects: - kind: User name: steven @@ -284,7 +461,13 @@ When a new Namespace is created, Kyverno will generate a new RoleBinding called In some cases, a triggering (source) resource and generated (downstream) resource need to share the same life cycle. That is, when the triggering resource is deleted so too should the generated resource. This is valuable because some resources are only needed in the presence of another, for example a Service of type `LoadBalancer` necessitating the need for a specific network policy in some CNI plug-ins. -When a generate rule has synchronization enabled (`synchronize: true`), deletion of the triggering resource will automatically cause deletion of the downstream (generated) resource. In addition to deletion, if the triggering resource is altered in a way such that it no longer matches the definition in the rule, that too will cause removal of the downstream resource. In cases where synchronization needs to be disabled, if the trigger and downstream are both Namespaced resources and in the same Namespace, the ownerReference technique can be used. +When a generate rule has synchronization enabled (`synchronize: true`), deletion of the triggering resource will automatically cause deletion of the downstream (generated) resource. In addition to deletion, if the triggering resource is altered in a way such that it no longer matches the definition in the rule, that too will cause removal of the downstream resource. In cases where synchronization needs to be disabled, if the trigger and downstream are both Namespaced resources and in the same Namespace, the ownerReference technique can be used. + +For a `generate.foreach` type of declaration, Kyverno does not prevent modifications to the rule definition. When the `synchronize` option is enabled for such a rule, Kyverno will not synchronize changes to existing target resources when updates are made to the target resource specification. For instance, if a `generate.foreach` declaration initially creates a NetworkPolicy named `staging/networkpolicy-default`, and is subsequently modified to create a new NetworkPolicy named `staging/networkpolicy-new`, any further changes will not be applied to the existing `staging/networkpolicy-default` NetworkPolicy resource. + +{{% alert title="Note" color="info" %}} +Synchronization involving changes to trigger resources are confined to the `match` block and do not take into consideration preconditions. +{{% /alert %}} It is possible to set the `ownerReferences` field in the generated resource which, when pointed to the trigger, will cause deletion of the trigger to instruct Kubernetes to garbage collect the downstream. With the below example, when the generated ConfigMap specifies the `metadata.ownerReferences[]` object and defines the following fields including `uid`, which references the triggering Service resource, an owner-dependent relationship is formed. Later, if the Service is deleted, the ConfigMap will be as well. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/#owner-references-in-object-specifications) for more details including an important caveat around the scoping of these references. Specifically, Namespaced resources cannot be the owners of cluster-scoped resources, and cross-namespace references are also disallowed. diff --git a/content/en/docs/writing-policies/jmespath.md b/content/en/docs/writing-policies/jmespath.md index 185014ec6..2ec5d2744 100644 --- a/content/en/docs/writing-policies/jmespath.md +++ b/content/en/docs/writing-policies/jmespath.md @@ -2693,7 +2693,7 @@ This filter can be helpful when needing to produce output for a field which only |--------------------|---------| | String | Boolean | -**Example:** This policy sets the `hostIPC` field of a Pod spec appropriately based on the value of a label (a string). Note that use of this filter may require setting the policy option `spec.schemaValidation` to `false` since there may be a type checking mismatch. +**Example:** This policy sets the `hostIPC` field of a Pod spec appropriately based on the value of a label (a string). ```yaml apiVersion: kyverno.io/v1 @@ -2701,7 +2701,6 @@ kind: ClusterPolicy metadata: name: to-boolean-demo spec: - schemaValidation: false rules: - name: canuseIPC match: diff --git a/content/en/docs/writing-policies/match-exclude.md b/content/en/docs/writing-policies/match-exclude.md index 75a6d6bf8..d365e0547 100644 --- a/content/en/docs/writing-policies/match-exclude.md +++ b/content/en/docs/writing-policies/match-exclude.md @@ -1,13 +1,11 @@ --- title: Selecting Resources description: > - Identifying and filtering resources for policy evaluation. + Identifying and filtering resources for rule evaluation. weight: 20 --- -The `match` and `exclude` filters control which resources policies are applied to. - -The `match` and `exclude` clauses have the same structure and can each contain **only one** of the two elements: +The `match` and `exclude` filters control the scope to which rules are applied. They have the same structure and can each contain **only one** of the two elements: * `any`: specify [resource filters](#resource-filters) on which Kyverno will perform the logical **OR** operation while choosing resources * `all`: specify [resource filters](#resource-filters) on which Kyverno will perform the logical **AND** operation while choosing resources @@ -35,7 +33,7 @@ Supported formats: * `Version/Kind` * `Kind` -To resolve kind naming conflicts, specify the API group and version. For example, the Kubernetes API, Calico, and Antrea all register a Kind with the name NetworkPolicy. These can be distinguished as: +To resolve kind naming conflicts, specify the API group and version. For example, the Kubernetes API, Calico, and Antrea all register a Custom Resource with the name NetworkPolicy. These can be distinguished as: * `networking.k8s.io/v1/NetworkPolicy` * `crd.antrea.io/v1alpha1/NetworkPolicy` @@ -65,25 +63,22 @@ In every rule, there must be a single `match` statement to function as the filte In this snippet, the `match` statement matches on all resources that **EITHER** have the kind Service with name "staging" **OR** have the kind Service and are being created in the "prod" Namespace. ```yaml -spec: - rules: - - name: no-LoadBalancer - match: - any: - - resources: - kinds: - - Service - names: - - staging - operations: - - CREATE - - resources: - kinds: - - Service - namespaces: - - prod - operations: - - CREATE +match: + any: + - resources: + kinds: + - Service + names: + - staging + operations: + - CREATE + - resources: + kinds: + - Service + namespaces: + - prod + operations: + - CREATE ``` The `operations[]` list is optional but recommended. When `operations[]` is absent, the default behavior is to match on `CREATE` and `UPDATE` requests. @@ -91,59 +86,46 @@ The `operations[]` list is optional but recommended. When `operations[]` is abse By combining multiple elements in the `match` statement, you can be more selective as to which resources you wish to process. Additionally, wildcards are supported for even greater control. For example, by adding the `resources.names` field, the previous `match` statement can further filter out Services that begin with the text "prod-" **OR** have the name "staging". `resources.names` takes in a list of names and would match all resources which have either of those names. ```yaml -spec: - rules: - - name: no-LoadBalancer - match: - any: - - resources: - names: - - "prod-*" - - "staging" - kinds: - - Service - operations: - - CREATE - - resources: - kinds: - - Service - operations: - - CREATE - subjects: - - kind: User - name: dave +match: + any: + - resources: + names: + - "prod-*" + - "staging" + kinds: + - Service + operations: + - CREATE + - resources: + kinds: + - Service + operations: + - CREATE + subjects: + - kind: User + name: dave ``` `match.any[0]` will now match on only Services that begin with the name "prod-" **OR** have the name "staging" and not those which begin with "dev-" or any other prefix. `match.any[1]` will match all Services being created by the `dave` user regardless of the name of the Service. And since these two are specified under the `any` key, the entire rule will act on all Services with names `prod-*` or `staging` **OR** on all services being created by the `dave` user. In both `match` and `exclude` statements, [wildcards](validate.md#wildcards) are supported to make selection more flexible. -{{% alert title="Note" color="info" %}} -Kyverno also supports `resources.name` which allows you to pass in only a single name rather than a list, but `resources.name` is being deprecated in favor of `resources.names` and will be removed in a future release. -{{% /alert %}} - In this snippet, the `match` statement matches only resources that have the group `networking.k8s.io`, version `v1` and kind `NetworkPolicy`. By adding Group,Version,Kind in the match statement, you can be more selective as to which resources you wish to process. ```yaml -spec: - rules: - - name: no-LoadBalancer - match: - any: - - resources: - kinds: - - networking.k8s.io/v1/NetworkPolicy +match: + any: + - resources: + kinds: + - networking.k8s.io/v1/NetworkPolicy ``` By specifying the `kind` in `version/kind` format, only specific versions of the resource kind will be matched. ```yaml -spec: - rules: - - name: no-LoadBalancer - match: - any: - - resources: - kinds: - - v1/NetworkPolicy +match: + any: + - resources: + kinds: + - v1/NetworkPolicy ``` Wildcards are supported in the `kinds` field allowing you to match on every resource type in the cluster. Selector labels support wildcards `(* or ?)` for keys as well as values in the following paths. @@ -192,6 +174,38 @@ spec: Keep in mind that when matching on all kinds (`*`) the policy you write must be applicable across all of them. Typical uses for this type of wildcard matching are elements within the `metadata` object. This type of matching should be used sparingly and carefully as it will instruct the API server to send every eligible resource type to Kyverno, greatly increasing the amount of processing performed by Kyverno. {{% /alert %}} +Matches for Namespaced resources can also be selected using Namespace labels by using the `namespaceSelector` field. This field allows selection in two granular ways: by using `matchLabels` or `matchExpressions`. When using `matchLabels`, a map of Namespace labels can be used to specify the match. For more advanced selection logic, use the `matchExpressions` element. + +In the below example, Kyverno will only consider matching Pods as those in a Namespace which contains the label `organization: engineering`. + +```yaml +match: + any: + - resources: + kinds: + - Pod + namespaceSelector: + matchLabels: + organization: engineering +``` + +And in this example, Kyverno will select Pods in a Namespace where the label key `namespacekind` does not equal the value `platform` or `ci`. + +```yaml +match: + any: + - resources: + kinds: + - Pod + namespaceSelector: + matchExpressions: + - key: namespacekind + operator: NotIn + values: + - platform + - ci +``` + Here are some other examples of `match` statements. ### Match a Deployment or StatefulSet with a specific label @@ -203,23 +217,20 @@ Condition checks inside the `resources` block follow the logic "**AND across typ In the below snippet, `kinds` and `selector` are peer/sibling elements, and so they are **AND**ed together. ```yaml -spec: - rules: - - name: match-critical-app - match: - any: - # AND across kinds and namespaceSelector - - resources: - # OR inside list of kinds - kinds: - - Deployment - - StatefulSet - operations: - - CREATE - - UPDATE - selector: - matchLabels: - app: critical +match: + any: + # AND across kinds and namespaceSelector + - resources: + # OR inside list of kinds + kinds: + - Deployment + - StatefulSet + operations: + - CREATE + - UPDATE + selector: + matchLabels: + app: critical ``` This pattern can be leveraged to produce very fine-grained control over the selection of resources, for example the snippet as shown below which combines `match` elements that include `resources`, `subjects`, `roles`, and `clusterRoles`. @@ -257,10 +268,10 @@ spec: - test # Optional label selectors. Values support wildcards (* and ?) selector: - matchLabels: - app: mongodb - matchExpressions: - - {key: tier, operator: In, values: [database]} + matchLabels: + app: mongodb + matchExpressions: + - {key: tier, operator: In, values: [database]} # Optional users or service accounts to be matched subjects: - kind: User @@ -305,6 +316,8 @@ spec: ## Combining match and exclude +In cases where a subset of the resources selected in a `match` block need to be omitted from processing, you may optionally use an `exclude` block. For example, you wish to only process Pods which do not have the label `env=prod`. An `exclude` block can be used to select those with the label `env=prod`. An `exclude` block must therefore be a subset of the `match` block. + All `match` and `exclude` conditions must be satisfied for a resource to be selected for the policy rule. In other words, the `match` and `exclude` conditions are evaluated using a logical **AND** operation. Elements in the `exclude` block follow the same specifications as those in the `match` block. ### Exclude `cluster-admin` ClusterRole @@ -329,13 +342,9 @@ spec: - cluster-admin ``` -### Exclude `kube-system` namespace - -This rule matches all Pods except those in the `kube-system` Namespace. +### Exclude `prod-alpha` Namespace -{{% alert title="Note" color="info" %}} -The `kube-system` Namespace is excluded from processing in a default installation of Kyverno via the [resourceFilter](../installation/customization.md#resource-filters). The example shown below is for illustration purposes and may not be strictly necessary. -{{% /alert %}} +This rule matches all Pods except those in the `prod-alpha` Namespace. ```yaml spec: @@ -353,7 +362,7 @@ spec: any: - resources: namespaces: - - kube-system + - prod-alpha ``` ### Match a label and exclude users and roles diff --git a/content/en/docs/writing-policies/mutate.md b/content/en/docs/writing-policies/mutate.md index 288ad20fe..d242745d4 100644 --- a/content/en/docs/writing-policies/mutate.md +++ b/content/en/docs/writing-policies/mutate.md @@ -422,7 +422,7 @@ In addition to standard mutations, Kyverno also supports mutation on existing re 1. Mutation for existing resources is an asynchronous process. This means there will be a variable amount of delay between the period where the trigger was observed and the existing resource was mutated. 2. Custom permissions are almost always required. Because these mutations occur on existing resources and not an AdmissionReview (which does not yet exist), Kyverno may need additional permissions which it does not have by default. See the section on [customizing permissions](../installation/customization.md#customizing-permissions) on how to grant additional permission to the Kyverno background controller's ServiceAccount to determine, prior to installing mutate existing rules, if additional permissions are required. Kyverno will perform these permissions checks at the time a mutate existing policy is installed. Missing or incorrect permissions will result in failure to create the policy. -To define such a policy, trigger resources need to be specified in the `match` block. The target resources--resources targeted for mutation--are specified in each mutate rule under `mutate.targets`. Note that all target resources within a single rule must share the same definition schema. For example, a mutate existing rule fails if this rule mutates both `Pod` and `Deployment` as they do not share the same OpenAPI V3 schema (except `metadata`). +To define a "mutate existing" policy, trigger resources need to be specified in the `match` block. The target resources--resources targeted for mutation--are specified in each mutate rule under `mutate.targets`. Mutate existing rules differ from standard mutate rules when these targets are defined. Note that all target resources within a single rule must share the same definition schema. For example, a mutate existing rule fails if this rule mutates both `Pod` and `Deployment` as they do not share the same OpenAPI V3 schema (except `metadata`). Because the `match` and `mutate.targets[]` stanzas have two separate scopes, when wishing to match on and mutate the same kind of resources any exclusionary conditions must be placed in the correct scope. Match filter criteria do not implicitly function as the input filter for target selection. For example, wishing to match on and mutate existing Ingress resources which have the annotation `corp.org/facing=internal` should at least have the annotation as a selection criteria in the targets section and may use either anchors or preconditions as described further below. Placing this annotation in the `match` clause will only result in Kyverno triggering upon those resources and not necessarily mutating them. @@ -462,8 +462,7 @@ spec: ``` By default, the above policy will not be applied when it is installed. This behavior can be configured via `mutateExistingOnPolicyUpdate` attribute. If you set `mutateExistingOnPolicyUpdate` to `true`, Kyverno will mutate the existing secret on policy CREATE and UPDATE AdmissionReview events. - -Note that the mutate existing rules are force reconciled every hour by default regardless of `mutateExistingOnPolicyUpdate` settings. The reconciliation interval can be customized through use of environment variable `BACKGROUND_SCAN_INTERVAL` of the background controller. +When `mutateExistingOnPolicyUpdate` is specified as `true`, `mutate.targets` must be specified. ```yaml apiVersion: kyverno.io/v1 @@ -476,14 +475,21 @@ spec: - name: mutate-secret-on-configmap-event match: any: - - resources: - kinds: - - ConfigMap - names: - - dictionary-1 - namespaces: - - staging -... + - resources: + kinds: + - ConfigMap + names: + - dictionary-1 + namespaces: + - staging + mutate: + # ... + targets: + - apiVersion: v1 + kind: Secret + name: secret-1 + namespace: "{{ request.object.metadata.namespace }}" + # ... ``` {{% alert title="Note" color="warning" %}} @@ -543,6 +549,67 @@ spec: The targets matched by a mutate existing rule are not subject to Kyverno's [resource filters](../installation/customization.md#resource-filters). Always develop and test rules in a sandboxed cluster to ensure the scope is correctly confined. {{% /alert %}} +Mutate existing rules are force reconciled every hour by default regardless of the `mutateExistingOnPolicyUpdate` value. The reconciliation interval can be customized through use of the environment variable `BACKGROUND_SCAN_INTERVAL` set on the background controller. + +Starting from kyverno `v1.11.2`, mutate existing rules that trigger on deletion of a resource will be skipped unless explicitly specified that the `DELETE` operation should match + +For example,the following policy should add a label to a configmap when a deployment is created or updated +```yaml +apiVersion: kyverno.io/v1 +kind: Policy +metadata: + name: mutate-configmap-on-undefined-deployment-operation +spec: + background: false + rules: + - name: mutate-configmap-on-undefined-deployment-operation + match: + all: + - resources: + kinds: + - Deployment + mutate: + targets: + - apiVersion: v1 + kind: ConfigMap + name: example + namespace: example + patchesJson6902: |- + - path: "/metadata/labels/modified-by-kyverno" + op: add + value: "true" +``` + +To have it also run the mutation when the deployment is deleted, the policy should be modified as such +```yaml +apiVersion: kyverno.io/v1 +kind: Policy +metadata: + name: mutate-configmap-on-undefined-deployment-operation +spec: + background: false + rules: + - name: mutate-configmap-on-undefined-deployment-operation + match: + all: + - resources: + kinds: + - Deployment + operations: + # add other operations if needed + - DELETE + mutate: + targets: + - apiVersion: v1 + kind: ConfigMap + name: example + namespace: example + patchesJson6902: |- + - path: "/metadata/labels/modified-by-kyverno" + op: add + value: "true" +``` + ### Variables Referencing Target Resources To reference data in target resources, you can define the variable `target` followed by the path to the desired attribute. For example, using `target.metadata.labels.env` references the label `env` in the target resource. @@ -659,6 +726,10 @@ Status: State: Failed ``` +### Mutate Existing Tips + +Combine multiple generate existing rules into a single rule when they mutate the same resource. When these rules are combined, only a single mutation happens which is quicker and more efficient. Otherwise, by using separate rules, multiple UpdateRequests may be created as Kyverno attempts to reconcile changes from both rules simultaneously. This can take longer, require more processing resources, and in some extreme instances can fail. + ## Mutate Rule Ordering (Cascading) In some cases, it might be desired to have multiple levels of mutation rules apply to incoming resources. The `match` statement in rule A would apply a mutation to the resource, and the result of that mutation would trigger a `match` statement in rule B that would apply a second mutation. In such cases, Kyverno can accommodate more complex mutation rules, however rule ordering matters to guarantee consistent results. @@ -792,6 +863,104 @@ Labels: backup-needed=no ``` +## Combining Mutate and Generate + +In some use cases, it may be necessary to perform a mutation as a follow-on action from a Kyverno generate rule. In these cases, you can combine generation with mutation to achieve the desired effect. Some use cases for this include mutating a cloned resource to add fields not present in the source and performing a mutation of an existing resource in response to a generate rule. + +When combining "standard" mutation and generation behaviors, order the rules in a single policy such that the generate occurs first followed by the mutate. + +In the below policy, a Secret named `regcred` is generated into a newly-created Namespace followed by the addition of the `foo: bar` label on the generated Secret. + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: sync-secrets +spec: + rules: + - name: sync-image-pull-secret + match: + any: + - resources: + kinds: + - Namespace + generate: + apiVersion: v1 + kind: Secret + name: regcred + namespace: "{{request.object.metadata.name}}" + synchronize: true + clone: + namespace: default + name: regcred + - name: mutate-secret + match: + any: + - resources: + kinds: + - Secret + names: + - regcred + mutate: + patchStrategicMerge: + metadata: + labels: + foo: bar +``` + +When combining a generation with a mutation on an existing resource, the Kyverno background controller must be instructed to not filter out the request from the previous rule. By default, these internal requests are disregarded as a form of loop protection. Set the rule-level field `skipBackgroundRequests: false` in such cases so the background controller processes the request from the generate rule before it. You will be responsible for ensuring your policy and rule combinations do not result in a loop. This step was not necessary in the previous use case because the admission controller was responsible for processing the "standard" mutate rule during admission review. + +In the below policy, a ConfigMap is generated into a newly-created Namespace. A follow-on Kyverno mutate existing rule is used to mark the Namespace as "ready" via a label only once the resource has been successfully created. Note that the `skipBackgroundRequests: false` field is present on the mutate existing rule and not the generate rule. + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: demo-cluster-policy +spec: + mutateExistingOnPolicyUpdate: false + rules: + - name: demo-generate + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + generate: + apiVersion: v1 + kind: ConfigMap + name: somecustomcm + namespace: "{{request.object.metadata.name}}" + synchronize: false + data: + metadata: + labels: + custom/related-namespace: "{{request.object.metadata.name}}" + data: + key: value + - name: demo-mutate-existing + skipBackgroundRequests: false + match: + all: + - resources: + kinds: + - ConfigMap + selector: + matchLabels: + custom/related-namespace: "?*" + mutate: + targets: + - apiVersion: v1 + kind: Namespace + name: '{{ request.object.metadata.labels."custom/related-namespace" }}' + patchStrategicMerge: + metadata: + labels: + custom/namespace-ready: "true" +``` + ## foreach A `foreach` declaration can contain multiple entries to process different sub-elements e.g. one to process a list of containers and another to process the list of initContainers in a Pod. diff --git a/content/en/docs/writing-policies/tips.md b/content/en/docs/writing-policies/tips.md index 330477b53..c51a5b5ad 100644 --- a/content/en/docs/writing-policies/tips.md +++ b/content/en/docs/writing-policies/tips.md @@ -67,16 +67,12 @@ Depending on the level of detail needed, you may need to increase the log level. * The choice between using a `pattern` statement or a `deny` statement depends largely on the data you need to consider; `pattern` works on incoming (new) objects while `deny` can additionally work on variable data such as the API operation (CREATE, UPDATE, etc.), old object data, and ConfigMap data. -* In some less common cases, Kyverno will attempt to validate the schema of a policy and fail because it cannot determine if it satisfies the OpenAPI schema definition for that resource. In these cases add `spec.schemaValidation: false` to your policy to tell Kyverno to skip validation. This is similar to passing the `--validate` flag to `kubectl`. - ## Mutate * When writing policies which perform [cascading mutations](mutate.md#mutate-rule-ordering-cascading), rule ordering matters. All rules which perform cascading mutations should be in the same policy definition and ordered top to bottom to ensure consistent results. * Need to mutate an object at a specific ordered position within an array? Use the [`patchesJson6902`](mutate.md#rfc-6902-jsonpatch) method. -* Just like in `validate` rules, in some less common cases Kyverno will attempt to validate the schema of a policy and fail because it cannot determine if it satisfies the OpenAPI schema definition for that resource. In mutate rules, Kyverno will internally try to generate a patch to see if the policy would be valid. In these cases add `spec.schemaValidation: false` to your policy to tell Kyverno to skip validation. This is similar to passing the `--validate` flag to `kubectl`. - ## Generate * `generate` rules which trigger off the same source object should be organized in the same policy definition. diff --git a/content/en/docs/writing-policies/validate.md b/content/en/docs/writing-policies/validate.md index 1574b1b08..8ec815eb9 100644 --- a/content/en/docs/writing-policies/validate.md +++ b/content/en/docs/writing-policies/validate.md @@ -81,7 +81,7 @@ Change the `development` value to `production` and try again. Kyverno permits cr ## Validation Failure Action -The `validationFailureAction` attribute controls admission control behaviors for resources that are not compliant with a policy. If the value is set to `Enforce`, resource creation or updates are blocked when the resource does not comply. When the value is set to `Audit`, a policy violation is logged in a `PolicyReport` or `ClusterPolicyReport` but the resource creation or update is allowed. For preexisting resources which violate a newly-created policy set to `Enforce` mode, Kyverno will allow subsequent updates to those resources which continue to violate the policy as a way to ensure no existing resources are impacted. However, should a subsequent update to the violating resource(s) make them compliant, any further updates which would produce a violation are blocked. +The `validationFailureAction` attribute controls admission control behaviors for resources that are not compliant with a policy. If the value is set to `Enforce`, resource creation or updates are blocked when the resource does not comply. When the value is set to `Audit`, a policy violation is logged in a `PolicyReport` or `ClusterPolicyReport` but the resource creation or update is allowed. For preexisting resources which violate a newly-created policy set to `Enforce` mode, Kyverno will allow subsequent updates to those resources which continue to violate the policy as a way to ensure no existing resources are impacted. However, should a subsequent update to the violating resource(s) make them compliant, any further updates which would produce a violation are blocked. This behaviour can be disabled using `validate.allowExistingViolations`, when `validate.allowExistingViolations` is set to `false` in an `Enforce` mode validate rule, updates to preexisting resources which violate that rule will be blocked. ## Validation Failure Action Overrides @@ -653,7 +653,7 @@ The following child declarations are permitted in a `foreach`: In addition, each `foreach` declaration can contain the following declarations: - [Context](external-data-sources.md): to add additional external data only available per loop iteration. -- [Preconditions](preconditions.md): to control when a loop iteration is skipped +- [Preconditions](preconditions.md): to control when a loop iteration is skipped. - `elementScope`: controls whether to use the current list element as the scope for validation. Defaults to "true" if not specified. Here is a complete example to enforce that all container images are from a trusted registry: @@ -1697,7 +1697,7 @@ However, setting the deployment image as `staging.example.com/nginx` will allow A ValidatingAdmissionPolicy provides a declarative, in-process option for validating admission webhooks using the [Common Expression Language](https://github.com/google/cel-spec) (CEL) to perform resource validation checks directly in the API server. -Kubernetes [ValidatingAdmissionPolicy](https://kubernetes.io/docs/reference/access-authn-authz/validating-admission-policy/) was first introduced in 1.26, and it's not fully enabled by default as of Kubernetes versions up to and including 1.28. +Kubernetes [ValidatingAdmissionPolicy](https://kubernetes.io/docs/reference/access-authn-authz/validating-admission-policy/) was first introduced in 1.26, and it is enabled by default in 1.30. {{% alert title="Tip" color="info" %}} The Kyverno Command Line Interface (CLI) enables the validation and testing of ValidatingAdmissionPolicies on resources before adding them to a cluster. It can be integrated into CI/CD pipelines to help with the resource authoring process, ensuring that they adhere to the required standards before deployment. @@ -1717,12 +1717,12 @@ To generate ValidatingAdmissionPolicies, make sure to: 1. Enable `ValidatingAdmissionPolicy` [feature gate](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). -2. For 1.27, enable `admissionregistration.k8s.io/v1alpha1` API, and for 1.28 enable both `admissionregistration.k8s.io/v1alpha1` and `admissionregistration.k8s.io/v1beta1` API. +2. Enable `admissionregistration.k8s.io/v1beta1` API. Here is the minikube command to enable ValidatingAdmissionPolicy: ``` - minikube start --extra-config=apiserver.runtime-config=admissionregistration.k8s.io/v1beta1,apiserver.runtime-config=admissionregistration.k8s.io/v1alpha1 --feature-gates='ValidatingAdmissionPolicy=true' + minikube start --extra-config=apiserver.runtime-config=admissionregistration.k8s.io/v1beta1 --feature-gates='ValidatingAdmissionPolicy=true' ``` 3. Configure Kyverno to manage ValidatingAdmissionPolicies using the `--generateValidatingAdmissionPolicy=true` flag in the admission controller. @@ -1757,8 +1757,6 @@ To generate ValidatingAdmissionPolicies, make sure to: ValidatingAdmissionPolicies can only be generated from the `validate.cel` sub-rules in Kyverno policies. Refer to the [CEL subrule](#common-expression-language-cel) section for more information. -In case there is a PolicyException defined for the Kyverno policy, the ValidatingAdmissionPolicy will not be generated. The PolicyException is used to exclude certain resources from being validated by Kyverno policies. Refer to the [PolicyException](exceptions.md) page for more information. - Below is an example of a Kyverno policy that can be used to generate a ValidatingAdmissionPolicy and its binding: ```yaml @@ -1795,7 +1793,7 @@ status: The generated ValidatingAdmissionPolicy: ```yaml -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicy metadata: labels: @@ -1832,7 +1830,7 @@ spec: The generated ValidatingAdmissionPolicyBinding: ```yaml -apiVersion: admissionregistration.k8s.io/v1beta1 +apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingAdmissionPolicyBinding metadata: labels: @@ -1893,4 +1891,137 @@ Since Kubernetes ValidatingAdmissionPolicies are cluster-scoped resources, Clust When a Kyverno policy matches solely on Pods, the generated ValidatingAdmissionPolicy will match both `pods` and `pods/ephemeralcontainers`. This occurs because Kyverno inherently includes `pods/ephemeralcontainers` by default in the corresponding ValidatingWebhookConfiguration, and we require analogous behavior for the ValidatingAdmissionPolicies. {{% /alert %}} -The generated ValidatingAdmissionPolicy with its binding is totally managed by the Kyverno admission controller which means deleting/modifying these generated resources will be reverted. Any updates to Kyverno policy triggers synchronization in the corresponding ValidatingAdmissionPolicy. +The generated ValidatingAdmissionPolicy with its binding are totally managed by the Kyverno admission controller which means deleting/modifying these generated resources will be reverted. Any updates to Kyverno policy triggers synchronization in the corresponding ValidatingAdmissionPolicy. + +In case there is a [PolicyException](exceptions.md) defined for the Kyverno policy, the corresponding ValidatingAdmissionPolicy will make use of the `matchConstraints.excludeResourceRules` field. + +Below is an example of a Kyverno policy and a PolicyException that matches it. Both the policy and the exception will be used to generate a ValidatingAdmissionPolicy and its corresponding binding. + + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-host-path +spec: + background: false + rules: + - name: host-path + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + - ReplicaSet + - DaemonSet + operations: + - CREATE + - UPDATE + namespaceSelector: + matchExpressions: + - key: type + operator: In + values: + - connector + validate: + failureAction: Audit + cel: + expressions: + - expression: "!has(object.spec.template.spec.volumes) || object.spec.template.spec.volumes.all(volume, !has(volume.hostPath))" + message: "HostPath volumes are forbidden. The field spec.template.spec.volumes[*].hostPath must be unset." +``` + +```yaml +apiVersion: kyverno.io/v2 +kind: PolicyException +metadata: + name: policy-exception +spec: + exceptions: + - policyName: disallow-host-path + ruleNames: + - host-path + match: + any: + - resources: + kinds: + - Deployment + names: + - important-tool + operations: + - CREATE + - UPDATE +``` + +The generated ValidatingAdmissionPolicy: + +```yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + labels: + app.kubernetes.io/managed-by: kyverno + name: disallow-host-path + ownerReferences: + - apiVersion: kyverno.io/v1 + kind: ClusterPolicy + name: disallow-host-path +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: + - apps + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - deployments + - statefulsets + - replicasets + - daemonsets + namespaceSelector: + matchExpressions: + - key: type + operator: In + values: + - connector + excludeResourceRules: + - apiGroups: + - apps + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resourceNames: + - important-tool + resources: + - deployments + validations: + - expression: '!has(object.spec.template.spec.volumes) || object.spec.template.spec.volumes.all(volume, + !has(volume.hostPath))' + message: HostPath volumes are forbidden. The field spec.template.spec.volumes[*].hostPath + must be unset. +``` + +The generated ValidatingAdmissionPolicyBinding: + +```yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicyBinding +metadata: + labels: + app.kubernetes.io/managed-by: kyverno + name: disallow-host-path-binding + ownerReferences: + - apiVersion: kyverno.io/v1 + kind: ClusterPolicy + name: disallow-host-path +spec: + policyName: disallow-host-path + validationActions: [Audit, Warn] +``` diff --git a/content/en/docs/writing-policies/variables.md b/content/en/docs/writing-policies/variables.md index 657fc00ea..7c9199ee0 100644 --- a/content/en/docs/writing-policies/variables.md +++ b/content/en/docs/writing-policies/variables.md @@ -7,6 +7,8 @@ weight: 90 Variables make policies smarter and reusable by enabling references to data in the policy definition, the [admission review request](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#webhook-request-and-response), and external data sources like ConfigMaps, the Kubernetes API Server, OCI image registries, and even external service calls. +In Kyverno, you can use the double braces syntax e.g., `{{ ... }}`, to reference a variable. For variables in policy declarations the `$( ... )` syntax is used instead. + Variables are stored as JSON and Kyverno supports using [JMESPath](http://jmespath.org/) (pronounced "James path") to select and transform JSON data. With JMESPath, values from data sources are referenced in the format of `{{key1.key2.key3}}`. For example, to reference the name of an new/incoming resource during a `kubectl apply` action such as a Namespace, you would write this as a variable reference: `{{request.object.metadata.name}}`. The policy engine will substitute any values with the format `{{ }}` with the variable value before processing the rule. For a page dedicated to exploring JMESPath's use in Kyverno see [here](jmespath.md). Variables may be used in most places in a Kyverno rule or policy with one exception being in `match` or `exclude` statements. ## Pre-defined Variables @@ -172,7 +174,7 @@ The result of the mutation of this Pod with respect to the `OTEL_RESOURCE_ATTRIB rule_applied=imbue-pod-spec ``` -### Variables in Helm +## Variables in Helm Both Kyverno and Helm use Golang-style variable substitution syntax and, as a result, Kyverno policies containing variables deployed through Helm may need to be "wrapped" to avoid Helm interpreting them as Helm variables. @@ -196,6 +198,7 @@ value: {{ `"{{ element.securityContext.capabilities.drop[].to_upper(@) || `}}`[] in order to render properly. + ## Variables from admission review requests Kyverno operates as a webhook inside Kubernetes. Whenever a new request is made to the Kubernetes API server, for example to create a Pod, the API server sends this information to the webhooks registered to listen to the creation of Pod resources. This incoming data to a webhook is passed as a [`AdmissionReview`](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#webhook-request-and-response) object. There are four commonly used data properties available in any AdmissionReview request: @@ -482,6 +485,78 @@ spec: In this example, AdmissionReview data is first collected in the inner expression in the form of `{{request.object.metadata.labels.app}}` while the outer expression is built from a ConfigMap context named `LabelsCM`. + +## Shallow substitution + +By default, Kyverno performs nested substitution of variables. However, in some cases, nested substitution may not be desireable. + +The syntax `{{- ... }}` can be used for shallow (one time only) substitution of variables. + +Here is a more detailed example. + +Consider a policy that loads a ConfigMap that contains [HCL](https://developer.hashicorp.com/terraform/language/syntax/configuration) synytax data, and patches resource configurations: + +Policy: + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: vault-auth-backend +spec: + validationFailureAction: Audit + background: true + mutateExistingOnPolicyUpdate: true + rules: + - name: vault-injector-config-blue-to-green-auth-backend + context: + - name: hcl + variable: + jmesPath: replace_all( '{{ request.object.data.config }}', 'from_string','to_string') + match: + any: + - resources: + kinds: + - ConfigMap + names: + - test-* + namespaces: + - corp-tech-ap-team-ping-ep + mutate: + patchStrategicMerge: + data: + config: '{{- hcl }}' + targets: + - apiVersion: v1 + kind: ConfigMap + name: '{{ request.object.metadata.name }}' + namespace: '{{ request.object.metadata.namespace }}' + name: vault-injector-config-blue-to-green-auth-backend +``` + +ConfigfMap: + +```yaml +apiVersion: v1 +data: + config: |- + from_string + {{ some hcl tempalte }} +kind: ConfigMap +metadata: + annotations: + labels: + argocd.development.cpl..co.at/app: corp-tech-ap-team-ping-ep + name: vault-injector-config-http-echo + namespace: corp-tech-ap-team-ping-ep + +``` + +In this case, since HCL also uses the `{{ ... }}` variable syntax, Kyverno needs to be instructed to not attempt to resolve variables in the HCL. + +To only substitute the rule data with the HCL, and not perform nested subsitutions, the declaration `'{{- hcl }}'` uses the shallow substitution syntax. + + ## Evaluation Order Kyverno policies can contain variables in: diff --git a/content/en/docs/writing-policies/verify-images/_index.md b/content/en/docs/writing-policies/verify-images/_index.md index de9f15764..920b5ec8e 100644 --- a/content/en/docs/writing-policies/verify-images/_index.md +++ b/content/en/docs/writing-policies/verify-images/_index.md @@ -5,10 +5,6 @@ description: > weight: 60 --- -{{% alert title="Warning" color="warning" %}} -Image verification is a **beta** feature and there may be changes to the API. -{{% /alert %}} - The logical structure of an verifyImages rule is shown below: Image Verification Rule diff --git a/content/en/docs/writing-policies/verify-images/notary/_index.md b/content/en/docs/writing-policies/verify-images/notary/_index.md index ce1db8b8a..1d1e816e4 100644 --- a/content/en/docs/writing-policies/verify-images/notary/_index.md +++ b/content/en/docs/writing-policies/verify-images/notary/_index.md @@ -40,18 +40,32 @@ ghcr.io/kyverno/test-verify-image@sha256:b31bfb4d0213f254d361e0079deaaebefa4f82b You can also use an OCI registry client to discover signatures and attestations for an image. ```sh -oras discover ghcr.io/kyverno/test-verify-image:signed -o tree -ghcr.io/kyverno/test-verify-image:signed +oras discover -o tree ghcr.io/kyverno/test-verify-image:signed +ghcr.io/kyverno/test-verify-image@sha256:b31bfb4d0213f254d361e0079deaaebefa4f82ba7aa76ef82e90b4935ad5b105 ├── application/vnd.cncf.notary.signature -│   └── sha256:7f870420d92765b42cec0f71ee8e25bf39b692f64d95d6f6607e9e6e54300265 +│ ├── sha256:7f870420d92765b42cec0f71ee8e25bf39b692f64d95d6f6607e9e6e54300265 +│ └── sha256:f7d941ed9e93a1ff1d5dee3b091144a87dae1d73481d5be93aa65258a110c689 ├── vulnerability-scan -│   └── sha256:f89cb7a0748c63a674d157ca84d725ff3ac09cc2d4aee9d0ec4315e0fe92a5fd -│   └── application/vnd.cncf.notary.signature -│   └── sha256:ec45844601244aa08ac750f44def3fd48ddacb736d26b83dde9f5d8ac646c2f3 -└── sbom/cyclone-dx - └── sha256:8cad9bd6de426683424a204697dd48b55abcd6bb6b4930ad9d8ade99ae165414 +│ └── sha256:f89cb7a0748c63a674d157ca84d725ff3ac09cc2d4aee9d0ec4315e0fe92a5fd +│ └── application/vnd.cncf.notary.signature +│ └── sha256:ec45844601244aa08ac750f44def3fd48ddacb736d26b83dde9f5d8ac646c2f3 +├── sbom/cyclone-dx +│ └── sha256:8cad9bd6de426683424a204697dd48b55abcd6bb6b4930ad9d8ade99ae165414 +│ └── application/vnd.cncf.notary.signature +│ └── sha256:61f3e42f017b72f4277c78a7a42ff2ad8f872811324cd984830dfaeb4030c322 +├── application/vnd.cyclonedx+json +│ └── sha256:aa886b475b431a37baa0e803765a9212f0accece0b82a131ebafd43ea78fa1f8 +│ └── application/vnd.cncf.notary.signature +│ ├── sha256:00c5f96577878d79b545d424884886c37e270fac5996f17330d77a01a96801eb +│ └── sha256:f3dc4687f5654ea8c2bc8da4e831d22a067298e8651fb59d55565dee58e94e2d +├── cyclonedx/vex +│ └── sha256:c058f08c9103bb676fcd0b98e41face2436e0a16f3d1c8255797b916ab5daa8a +│ └── application/vnd.cncf.notary.signature +│ └── sha256:79edc8936a4fb8758b9cb2b8603a1c7903f53261c425efb0cd85b09715eb6dfa +└── trivy/scan + └── sha256:a75ac963617462fdfe6a3847d17e5519465dfb069f92870050cce5269e7cbd7b └── application/vnd.cncf.notary.signature - └── sha256:61f3e42f017b72f4277c78a7a42ff2ad8f872811324cd984830dfaeb4030c322 + └── sha256:d1e2b2ba837c164c282cf389594791a190df872cf7712b4d91aa10a3520a8460 ``` ## Verifying Image Signatures @@ -145,15 +159,29 @@ Consider the following image: `ghcr.io/kyverno/test-verify-image:signed` ``` ghcr.io/kyverno/test-verify-image:signed ├── application/vnd.cncf.notary.signature -│ └── sha256:7f870420d92765b42cec0f71ee8e25bf39b692f64d95d6f6607e9e6e54300265 +│ ├── sha256:7f870420d92765b42cec0f71ee8e25bf39b692f64d95d6f6607e9e6e54300265 +│ └── sha256:f7d941ed9e93a1ff1d5dee3b091144a87dae1d73481d5be93aa65258a110c689 ├── vulnerability-scan -│ └── sha256:f89cb7a0748c63a674d157ca84d725ff3ac09cc2d4aee9d0ec4315e0fe92a5fd -│ └── application/vnd.cncf.notary.signature -│ └── sha256:ec45844601244aa08ac750f44def3fd48ddacb736d26b83dde9f5d8ac646c2f3 -└── sbom/cyclone-dx - └── sha256:8cad9bd6de426683424a204697dd48b55abcd6bb6b4930ad9d8ade99ae165414 - └── application/vnd.cncf.notary.signature - └── sha256:61f3e42f017b72f4277c78a7a42ff2ad8f872811324cd984830dfaeb4030c322 +│ └── sha256:f89cb7a0748c63a674d157ca84d725ff3ac09cc2d4aee9d0ec4315e0fe92a5fd +│ └── application/vnd.cncf.notary.signature +│ └── sha256:ec45844601244aa08ac750f44def3fd48ddacb736d26b83dde9f5d8ac646c2f3 +├── sbom/cyclone-dx +│ └── sha256:8cad9bd6de426683424a204697dd48b55abcd6bb6b4930ad9d8ade99ae165414 +│ └── application/vnd.cncf.notary.signature +│ └── sha256:61f3e42f017b72f4277c78a7a42ff2ad8f872811324cd984830dfaeb4030c322 +├── application/vnd.cyclonedx+json +│ └── sha256:aa886b475b431a37baa0e803765a9212f0accece0b82a131ebafd43ea78fa1f8 +│ └── application/vnd.cncf.notary.signature +│ ├── sha256:00c5f96577878d79b545d424884886c37e270fac5996f17330d77a01a96801eb +│ └── sha256:f3dc4687f5654ea8c2bc8da4e831d22a067298e8651fb59d55565dee58e94e2d +├── cyclonedx/vex +│ └── sha256:c058f08c9103bb676fcd0b98e41face2436e0a16f3d1c8255797b916ab5daa8a +│ └── application/vnd.cncf.notary.signature +│ └── sha256:79edc8936a4fb8758b9cb2b8603a1c7903f53261c425efb0cd85b09715eb6dfa +└── trivy/scan + └── sha256:a75ac963617462fdfe6a3847d17e5519465dfb069f92870050cce5269e7cbd7b + └── application/vnd.cncf.notary.signature + └── sha256:d1e2b2ba837c164c282cf389594791a190df872cf7712b4d91aa10a3520a8460 ``` This image has: @@ -161,6 +189,9 @@ This image has: 1. A notary signature. 2. A vulnerability scan report, signed using notary. 3. A CycloneDX SBOM, signed using notary. +4. A CycloneDX VEX report, signed using notary. +5. A Trivy scan report, signed using notary. + This policy checks the signature in the repo `ghcr.io/kyverno/test-verify-image` and ensures that it has been signed by verifying its signature against the provided certificates: ```yaml @@ -227,4 +258,116 @@ After this policy is applied, Kyverno will verify the signature on the sbom/cycl ```sh kubectl run test --image=ghcr.io/kyverno/test-verify-image:signed --dry-run=server pod/test created (server dry run) -``` \ No newline at end of file +``` + +### Validation across multiple image attestations + +Consider the image: `ghcr.io/kyverno/test-verify-image:signed` which image has: + +1. A notary signature. +2. A vulnerability scan report, signed using notary. +3. A CycloneDX VEX report, signed using notary. + +This policy checks: +1. The signature in the repo `ghcr.io/kyverno/test-verify-image` +2. Ensures that it has a vulnerability scan report of type `trivy/vulnerability`, and a CycloneDX VEX report of type `vex/cyclone-dx`, both are signed using the given certificate. +3. All the vulnerabilities found in the trivy scan report should be allowed in the vex report. + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: check-image-attestation +spec: + validationFailureAction: Enforce + webhookTimeoutSeconds: 30 + failurePolicy: Fail + rules: + - name: verify-attestation-notary + match: + any: + - resources: + kinds: + - Pod + context: + - name: keys + configMap: + name: keys + namespace: notary-verify-attestation + verifyImages: + - type: Notary + imageReferences: + - "ghcr.io/kyverno/test-verify-image*" + attestations: + - type: trivy/vulnerability + name: trivy + attestors: + - entries: + - certificates: + cert: |- + -----BEGIN CERTIFICATE----- + MIIDmDCCAoCgAwIBAgIUCntgF4FftePAhEa6nZTsu/NMT3cwDQYJKoZIhvcNAQEL + BQAwTDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMRAwDgYDVQQHDAdTZWF0dGxl + MQ8wDQYDVQQKDAZOb3RhcnkxDTALBgNVBAMMBHRlc3QwHhcNMjQwNjEwMTYzMTQ2 + WhcNMzQwNjA4MTYzMTQ2WjBMMQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAO + BgNVBAcMB1NlYXR0bGUxDzANBgNVBAoMBk5vdGFyeTENMAsGA1UEAwwEdGVzdDCC + ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJkEGqbILiWye6C1Jz+jwwDY + k/rovpXzxS+EQDvfj/YKvx37Kr4cjboJORu3wtzICWhPUtVWZ21ShfjerKgNq0iB + mrlF4cqz2KcOfuUT3XBglH/NwhEAqOrGPQrMsoQEFWgnilr0RTc+j4vDnkdkcTj2 + K/qPhQHRAeb97TdvFCqcZfAGqiOVUqzDGxd2INz/fJd4/nYRX3LJBn9pUGxqRwZV + ElP5B/aCBjJDdh6tAElT5aDnLGAB+3+W2YwG342ELyAl2ILpbSRUpKLNAfKEd7Nj + 1moIl4or5AIlTkgewZ/AK68HPFJEV3SwNbzkgAC+/mLVCD8tqu0o0ziyIUJtoQMC + AwEAAaNyMHAwHQYDVR0OBBYEFFTIzCppwv0vZnAVmETPm1CfMdcYMB8GA1UdIwQY + MBaAFFTIzCppwv0vZnAVmETPm1CfMdcYMAkGA1UdEwQCMAAwDgYDVR0PAQH/BAQD + AgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMDMA0GCSqGSIb3DQEBCwUAA4IBAQB8/vfP + /TQ3X80JEZDsttdvd9NLm08bTJ/T+nh0DIiV10aHymQT9/u+iahfm1+7mj+uv8LS + Y63LepQCX5p9SoFzt513pbNYXMBbRrOKpth3DD49IPL2Gce86AFGydfrakd86CL1 + 9MhFeWhtRf0KndyUX8J2s7jbpoN8HrN4/wZygiEqbQWZG8YtIZ9EewmoVMYirQqH + EvW93NcgmjiELuhjndcT/kHjhf8fUAgSuxiPIy6ern02fJjw40KzgiKNvxMoI9su + G2zu6gXmxkw+x0SMe9kX+Rg4hCIjTUM7dc66XL5LcTp4S5YEZNVC40/FgTIZoK0e + r1dC2/Y1SmmrIoA1 + -----END CERTIFICATE----- + - type: vex/cyclone-dx + name: vex + attestors: + - entries: + - certificates: + cert: |- + -----BEGIN CERTIFICATE----- + MIIDmDCCAoCgAwIBAgIUCntgF4FftePAhEa6nZTsu/NMT3cwDQYJKoZIhvcNAQEL + BQAwTDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMRAwDgYDVQQHDAdTZWF0dGxl + MQ8wDQYDVQQKDAZOb3RhcnkxDTALBgNVBAMMBHRlc3QwHhcNMjQwNjEwMTYzMTQ2 + WhcNMzQwNjA4MTYzMTQ2WjBMMQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAO + BgNVBAcMB1NlYXR0bGUxDzANBgNVBAoMBk5vdGFyeTENMAsGA1UEAwwEdGVzdDCC + ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJkEGqbILiWye6C1Jz+jwwDY + k/rovpXzxS+EQDvfj/YKvx37Kr4cjboJORu3wtzICWhPUtVWZ21ShfjerKgNq0iB + mrlF4cqz2KcOfuUT3XBglH/NwhEAqOrGPQrMsoQEFWgnilr0RTc+j4vDnkdkcTj2 + K/qPhQHRAeb97TdvFCqcZfAGqiOVUqzDGxd2INz/fJd4/nYRX3LJBn9pUGxqRwZV + ElP5B/aCBjJDdh6tAElT5aDnLGAB+3+W2YwG342ELyAl2ILpbSRUpKLNAfKEd7Nj + 1moIl4or5AIlTkgewZ/AK68HPFJEV3SwNbzkgAC+/mLVCD8tqu0o0ziyIUJtoQMC + AwEAAaNyMHAwHQYDVR0OBBYEFFTIzCppwv0vZnAVmETPm1CfMdcYMB8GA1UdIwQY + MBaAFFTIzCppwv0vZnAVmETPm1CfMdcYMAkGA1UdEwQCMAAwDgYDVR0PAQH/BAQD + AgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMDMA0GCSqGSIb3DQEBCwUAA4IBAQB8/vfP + /TQ3X80JEZDsttdvd9NLm08bTJ/T+nh0DIiV10aHymQT9/u+iahfm1+7mj+uv8LS + Y63LepQCX5p9SoFzt513pbNYXMBbRrOKpth3DD49IPL2Gce86AFGydfrakd86CL1 + 9MhFeWhtRf0KndyUX8J2s7jbpoN8HrN4/wZygiEqbQWZG8YtIZ9EewmoVMYirQqH + EvW93NcgmjiELuhjndcT/kHjhf8fUAgSuxiPIy6ern02fJjw40KzgiKNvxMoI9su + G2zu6gXmxkw+x0SMe9kX+Rg4hCIjTUM7dc66XL5LcTp4S5YEZNVC40/FgTIZoK0e + r1dC2/Y1SmmrIoA1 + -----END CERTIFICATE----- + validate: + deny: + conditions: + any: + - key: '{{ trivy.Vulnerabilities[*].VulnerabilityID }}' + operator: AnyNotIn + value: '{{ vex.vulnerabilities[*].id }}' + message: All vulnerabilities in trivy and vex should be same +``` + +After this policy is applied, Kyverno will verify the signatures in the image and the attestations and then evaluate the validate deny condition which checks all the vulneribilities in trivy report are there in vex report. + +```sh +kubectl run test --image=ghcr.io/kyverno/test-verify-image:signed --dry-run=server +pod/test created (server dry run) +``` diff --git a/content/en/docs/writing-policies/verify-images/sigstore/_index.md b/content/en/docs/writing-policies/verify-images/sigstore/_index.md index b66e2890b..01dfc66d3 100644 --- a/content/en/docs/writing-policies/verify-images/sigstore/_index.md +++ b/content/en/docs/writing-policies/verify-images/sigstore/_index.md @@ -90,6 +90,48 @@ check-image: invalid signature' ``` +### Verifying Sigstore bundles + +Container images signatures that use [sigstore bundle format](https://github.com/sigstore/protobuf-specs/blob/main/protos/sigstore_bundle.proto) such as [GitHub Artifact Attestation](https://docs.github.com/en/actions/security-for-github-actions/using-artifact-attestations) can be verified using verification type `SigstoreBundle`. The following example verifies images containing SLSA Provenance created and signed using GitHub Artifact Attestation. + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + annotations: + pod-policies.kyverno.io/autogen-controllers: none + name: sigstore-attestation-verification +spec: + background: false + validationFailureAction: Enforce + webhookTimeoutSeconds: 30 + rules: + - match: + any: + - resources: + kinds: + - Pod + name: sigstore-attestation-verification + verifyImages: + - imageReferences: + - "*" + type: SigstoreBundle + attestations: + - attestors: + - entries: + - keyless: + issuer: https://token.actions.githubusercontent.com + subject: https://github.com/vishal-chdhry/artifact-attestation-example/.github/workflows/build-attested-image.yaml@refs/heads/main + rekor: + url: https://rekor.sigstore.dev + conditions: + - all: + - key: '{{ buildDefinition.buildType }}' + operator: Equals + value: https://actions.github.io/buildtypes/workflow/v1 + type: https://slsa.dev/provenance/v1 +``` + ### Skipping Image References `skipImageReferences` can be used to precisely filter image references that should be verified by a policy. A list of references can be specified in `skipImageReferences` and images that match those references will be excluded from image verification process. The following example will match all images from `ghcr.io` but will skip images from `ghcr.io/trusted`. @@ -435,7 +477,7 @@ This image can now be verified using the leaf or root certificates. ## Keyless signing and verification -The following policy verifies an image signed using ephemeral keys and signing data stored in a transparency log, known as [keyless signing](https://docs.sigstore.dev/signing/overview/): +The following policy verifies an image signed using ephemeral keys and signing data stored in a transparency log, known as [keyless signing](https://docs.sigstore.dev/cosign/signing/overview/): ```yaml apiVersion: kyverno.io/v1 @@ -508,7 +550,7 @@ The supported formats include: * gcpkms://projects/[PROJECT]/locations/global/keyRings/[KEYRING]/cryptoKeys/[KEY] * hashivault://[KEY] -Refer to https://docs.sigstore.dev/cosign/kms_support for additional details. +Refer to https://docs.sigstore.dev/cosign/key_management/overview/ for additional details. ### Enabling IRSA to access AWS KMS @@ -646,67 +688,42 @@ spec: Kyverno does not by default have the same chain of trust as the underlying Kubernetes Nodes nor is it able to access them due to security concerns. Because the Nodes in your cluster can pull an image from a private registry (even if no authentication is required) does not mean Kyverno can. Kyverno ships with trust for the most common third-party certificate authorities and has no knowledge of internal PKI which may be in use by your private registry. Without the chain of trust established, Kyverno will not be able to fetch image metadata, signatures, or other OCI artifacts from a registry. Perform the following steps to present the necessary root certificates to Kyverno to establish trust. -1. There are two potential ways to have Kyverno trust your private registry. The first allows replacing all the certificates Kyverno trusts by default with only those needed by your internal registry. This has the benefit of being a simpler process at the cost of Kyverno losing trust for any public registries such as Docker Hub, Amazon ECR, GitHub Container Registry, etc. The second involves providing Kyverno with the same trust as your Nodes. Often times this trust includes the aforementioned public certificate authorities but in other cases may not. This first step involves the latter process. +There are two possible methods to present Kyverno with custom certificates. The first is to replace the internal default certificate store with your custom one in which all necessary certificates are stored in a ConfigMap. The second is to allow Kyverno to mount a hostPath volume on the underlying Node so it can read the same certificate store as the host. + +#### Replace -Update your internal `ca-certificates` bundle by adding your private certificate authorities root certificate to the bundle and regenerating it. Many Linux distributions have slightly different processes, but it is documented [here](https://ubuntu.com/server/docs/security-trust-store) for Ubuntu as an example. If this process has already been done and your Nodes are using this, simply copy the contents out and proceed to the next step. +Using this first method, you replace Kyverno's internal certificate store with your own. The certificate(s) you supply using this method will overwrite the entire certificate store Kyverno ships with by default which may prevent you from accessing public registries signed by third-party certificate authorities. This method may be favorable when you are only using private, internal registries with no need to contact external services. -2. Create a ConfigMap in your cluster, preferably in the Kyverno Namespace, which has these contents stored as a multi-line value. It should look something like the below. +To use this method, set the Helm value `global.caCertificates.data` along with the certificate(s) you wish Kyverno to trust. An example snippet is shown below. ```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: kyverno-certs - namespace: kyverno -data: - ca-certificates: | - -----BEGIN CERTIFICATE----- - MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE - AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw - CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ - BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND - - -----BEGIN CERTIFICATE----- - MIIBbzCCARWgAwIBAgIQK0Z1j0Q96/LIo4tNHxsPUDAKBggqhkjOPQQDAjAWMRQw - EgYDVQQDEwtab2xsZXJMYWJDQTAeFw0yMjA1MTgwODI2NTBaFw0zMjA1MTUwODI2 - NTBaMBYxFDASBgNVBAMTC1pvbGxlckxhYkNBMFkwEwYHKoZIzj0CAQYIKoZIzj0D - AQcDQgAEJxGhyW26O77E7fqFcbzljYzlLq/G7yANNwerWnWUKlW9gcrcPqZwwrTX - yaJZpdCWTObvbOyaOxq5NsytC/ubLKNFMEMwDgYDVR0PAQH/BAQDAgEGMBIGA1Ud - EwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFDoT1GEM8NYfxSKBkSzg4rpY+xdUMAoG - CCqGSM49BAMCA0gAMEUCIQDDLWFn/XJPqpNGXcyjlSJFxlQUJ5Cu/+nDvtbTeUGA - NAIgMsVwBafMtmLQFlfvZsE95UYoYUV4ayH+OLTTQaDQOPY= - -----END CERTIFICATE----- -``` - -3. Modify the Kyverno Deployment so it mounts this ConfigMap and overwrites the internal bundle provided by default. Refer to step one for the full ramifications of this act, especially if you have opted only to provide Kyverno with the certificate(s) of your internal root CA. An example snippet of the modified Deployment is shown below. +global: + caCertificates: + data: | + -----BEGIN CERTIFICATE----- + MIIBdjCCAR2gAwIBAgIBADAKBggqhkjOPQQDAjAjMSEwHwYDVQQDDBhrM3Mtc2Vy + dmVyLWNhQDE2ODEzODUyNDgwHhcNMjMwNDEzMTEyNzI4WhcNMzMwNDEwMTEyNzI4 + + mPCB0cIwCgYIKoZIzj0EAwIDRwAwRAIgYF0Dy5QuQpYFyHcQEVq5GJgrE9W4gAy2 + W/LgVuvZmucCIBcETS4DIw2pWAfeKRDaEOi2YsJoDpWd7lFLQBUbe4G7 + -----END CERTIFICATE----- +``` + +When this method is used, a ConfigMap containing your certificates is written for each controller and mounted to each controller's Deployment such that it replaces the internal certificate store. These contents now serve as Kyverno's trust store. + +#### Host Mount + +The second method involves providing Kyverno with the same trust as your Nodes by mounting the certificate store on the Nodes on which the Kyverno Pods run. Often times this trust includes public certificate authorities but in other cases may not. This method may be favorable when you are using a combination of internal, private registries and third-party, public registries. This assumes your Nodes already include trust for both. + +To use this method, set the Helm value `global.caCertificates.volume` along with a hostPath volume pointing to the certificate bundle on the Nodes where Kyverno Pods will run. An example snippet is shown below. ```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kyverno - namespace: kyverno -spec: - template: - spec: - containers: - - image: ghcr.io/kyverno/kyverno:v1.9.0 - name: kyverno - volumeMounts: - - mountPath: /.sigstore - name: sigstore - - name: ca-certificates - mountPath: /etc/ssl/certs/ca-certificates.crt - subPath: ca-certificates.crt - - volumes: - - name: sigstore - - name: ca-certificates - configMap: - name: kyverno-certs - items: - - key: ca-certificates - path: ca-certificates.crt +global: + caCertificates: + volume: + hostPath: + path: /etc/ssl/certs/ca-certificates.crt + type: File ``` ## Using a signature repository @@ -730,6 +747,28 @@ verifyImages: ... ``` +## Using a different signature algorithm + +By default, cosign uses `sha256` has func when computing digests. To use a different signature algorithm, specify the signature algorithm for each attestor as follows: + +```yaml +... +verifyImages: +- imageReferences: + - ghcr.io/kyverno/test-verify-image* + attestors: + - entries: + - signatureAlgorithm: sha256 + keys: + publicKeys: |- + -----BEGIN PUBLIC KEY----- + MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nXRh950IZbRj8Ra/N9sbqOPZrfM + 5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA== + -----END PUBLIC KEY----- +... +``` +Allowed values for signature algorithm are `sha224`, `sha256`, `sha384`, `sha512`. + ## Ignoring Tlogs and SCT Verification Cosign uses Rekor, a transparency log service to store signatures. In Cosign 2.0 verifies Rekor entries for both key-based and identity-based signing. To disable this set `ignoreTlog: true` in Kyverno policies: @@ -768,14 +807,14 @@ verifyImages: rekor: ignoreTlog: true url: https://rekor.sigstore.dev - pubKey: |- + pubkey: |- -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nXRh950IZbRj8Ra/N9sbqOPZrfM 5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA== -----END PUBLIC KEY----- ctlog: ignoreSCT: true - pubKey: |- + pubkey: |- -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nXRh950IZbRj8Ra/N9sbqOPZrfM 5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA== @@ -784,7 +823,7 @@ verifyImages: ## Using custom Rekor public key and CTLogs public key -You can also provide the Rekor public key and ctlog public key instead of Rekor url to verify tlog entry and SCT entry. Use `rekor.pubKey` and `ctlog.pubKey` respectively for this. +You can also provide the Rekor public key and ctlog public key instead of Rekor url to verify tlog entry and SCT entry. Use `rekor.pubkey` and `ctlog.pubkey` respectively for this. ```yaml verifyImages: @@ -799,13 +838,13 @@ verifyImages: 5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA== -----END PUBLIC KEY----- rekor: - pubKey: |- + pubkey: |- -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEyQfmL5YwHbn9xrrgG3vgbU0KJxMY BibYLJ5L4VSMvGxeMLnBGdM48w5IE//6idUPj3rscigFdHs7GDMH4LLAng== -----END PUBLIC KEY----- ctlog: - pubKey: |- + pubkey: |- -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEE8uGVnyDWPPlB7M5KOHRzxzPHtAy FdGxexVrR4YqO1pRViKxmD9oMu4I7K/4sM51nbH65ycB2uRiDfIdRoV/+A== diff --git a/content/en/policies/_index.md b/content/en/policies/_index.md index a53be6aa2..a767d1b30 100644 --- a/content/en/policies/_index.md +++ b/content/en/policies/_index.md @@ -2,6 +2,14 @@ title = "Policies" +++ +# Sample Policies + +The policies here are maintained by the community and are as samples that demontrate the power and flexibility of Kyverno. To use in your environment, make sure you test with the right versions of Kyverno and Kubernetes, and optimize for your use case. + +Select the **Policy Type** and **Policy Category** on the left navigation, or use the search, to find the policy you need. + +You can contribute policies or request additional samples in the [kyverno/policies](https://github.com/kyverno/policies) Git repository. + {{% pageinfo color="warning" %}} Most validate policies here are set to `Audit` mode by default. To block resources immediately, set to `Enforce`. diff --git a/content/en/policies/argo-cel/application-field-validation/application-field-validation.md b/content/en/policies/argo-cel/application-field-validation/application-field-validation.md new file mode 100644 index 000000000..123356538 --- /dev/null +++ b/content/en/policies/argo-cel/application-field-validation/application-field-validation.md @@ -0,0 +1,77 @@ +--- +title: "Application Field Validation in CEL expressions" +category: Argo in CEL +version: 1.11.0 +subject: Application +policyType: "validate" +description: > + This policy performs some best practices validation on Application fields. Path or chart must be specified but never both. And destination.name or destination.server must be specified but never both. +--- + +## Policy Definition +/argo-cel/application-field-validation/application-field-validation.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-field-validation + annotations: + policies.kyverno.io/title: Application Field Validation in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Application + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + This policy performs some best practices validation on Application fields. + Path or chart must be specified but never both. And destination.name or + destination.server must be specified but never both. +spec: + validationFailureAction: Audit + background: true + rules: + - name: source-path-chart + match: + any: + - resources: + kinds: + - Application + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.spec.source) && + ( + (has(object.spec.source.path) && !has(object.spec.source.chart)) || + (!has(object.spec.source.path) && has(object.spec.source.chart)) + ) + message: >- + `spec.source.path` OR `spec.source.chart` should be specified but never both. + - name: destination-server-name + match: + any: + - resources: + kinds: + - Application + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.spec.destination) && + ( + (has(object.spec.destination.server) && !has(object.spec.destination.name)) || + (!has(object.spec.destination.server) && has(object.spec.destination.name)) + ) + message: >- + `spec.destination.server` OR `spec.destination.name` should be specified but never both. + + +``` diff --git a/content/en/policies/argo-cel/application-prevent-default-project/application-prevent-default-project.md b/content/en/policies/argo-cel/application-prevent-default-project/application-prevent-default-project.md new file mode 100644 index 000000000..bf4336668 --- /dev/null +++ b/content/en/policies/argo-cel/application-prevent-default-project/application-prevent-default-project.md @@ -0,0 +1,49 @@ +--- +title: "Prevent Use of Default Project in CEL expressions" +category: Argo in CEL +version: 1.11.0 +subject: Application +policyType: "validate" +description: > + This policy prevents the use of the default project in an Application. +--- + +## Policy Definition +/argo-cel/application-prevent-default-project/application-prevent-default-project.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-prevent-default-project + annotations: + policies.kyverno.io/title: Prevent Use of Default Project in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Application + policies.kyverno.io/description: >- + This policy prevents the use of the default project in an Application. +spec: + validationFailureAction: Audit + background: true + rules: + - name: default-project + match: + any: + - resources: + kinds: + - Application + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.project) && object.spec.project != 'default'" + message: "The default project may not be used in an Application." + + +``` diff --git a/content/en/policies/argo-cel/application-prevent-updates-project/application-prevent-updates-project.md b/content/en/policies/argo-cel/application-prevent-updates-project/application-prevent-updates-project.md new file mode 100644 index 000000000..7f8ab295e --- /dev/null +++ b/content/en/policies/argo-cel/application-prevent-updates-project/application-prevent-updates-project.md @@ -0,0 +1,48 @@ +--- +title: "Prevent Updates to Project in CEL expressions" +category: Argo in CEL +version: +subject: Application +policyType: "validate" +description: > + This policy prevents updates to the project field after an Application is created. +--- + +## Policy Definition +/argo-cel/application-prevent-updates-project/application-prevent-updates-project.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: application-prevent-updates-project + annotations: + policies.kyverno.io/title: Prevent Updates to Project in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.12.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Application + policies.kyverno.io/description: >- + This policy prevents updates to the project field after an Application is created. +spec: + validationFailureAction: Audit + background: true + rules: + - name: project-updates + match: + any: + - resources: + kinds: + - Application + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + validate: + cel: + expressions: + - expression: "object.spec.project == oldObject.spec.project" + message: "The spec.project cannot be changed once the Application is created." + + +``` diff --git a/content/en/policies/argo-cel/applicationset-name-matches-project/applicationset-name-matches-project.md b/content/en/policies/argo-cel/applicationset-name-matches-project/applicationset-name-matches-project.md new file mode 100644 index 000000000..3a054dde4 --- /dev/null +++ b/content/en/policies/argo-cel/applicationset-name-matches-project/applicationset-name-matches-project.md @@ -0,0 +1,50 @@ +--- +title: "Ensure ApplicationSet Name Matches Project in CEL expressions" +category: Argo in CEL +version: 1.11.0 +subject: ApplicationSet +policyType: "validate" +description: > + This policy ensures that the name of the ApplicationSet is the same value provided in the project. +--- + +## Policy Definition +/argo-cel/applicationset-name-matches-project/applicationset-name-matches-project.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: applicationset-name-matches-project + annotations: + policies.kyverno.io/title: Ensure ApplicationSet Name Matches Project in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: ApplicationSet + policies.kyverno.io/description: >- + This policy ensures that the name of the ApplicationSet is the + same value provided in the project. +spec: + validationFailureAction: Audit + background: true + rules: + - name: match-name + match: + any: + - resources: + kinds: + - ApplicationSet + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.template.spec.project == object.metadata.name" + message: "The name must match the project." + + +``` diff --git a/content/en/policies/argo-cel/appproject-clusterresourceblacklist/appproject-clusterresourceblacklist.md b/content/en/policies/argo-cel/appproject-clusterresourceblacklist/appproject-clusterresourceblacklist.md new file mode 100644 index 000000000..f268027fe --- /dev/null +++ b/content/en/policies/argo-cel/appproject-clusterresourceblacklist/appproject-clusterresourceblacklist.md @@ -0,0 +1,55 @@ +--- +title: "Enforce AppProject with clusterResourceBlacklist in CEL expressions" +category: Argo in CEL +version: 1.11.0 +subject: AppProject +policyType: "validate" +description: > + An AppProject may optionally specify clusterResourceBlacklist which is a blacklisted group of cluster resources. This is often a good practice to ensure AppProjects do not allow more access than needed. This policy is a combination of two rules which enforce that all AppProjects specify clusterResourceBlacklist and that their group and kind have wildcards as values. +--- + +## Policy Definition +/argo-cel/appproject-clusterresourceblacklist/appproject-clusterresourceblacklist.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: appproject-clusterresourceblacklist + annotations: + policies.kyverno.io/title: Enforce AppProject with clusterResourceBlacklist in CEL expressions + policies.kyverno.io/category: Argo in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: AppProject + policies.kyverno.io/description: >- + An AppProject may optionally specify clusterResourceBlacklist which is a blacklisted + group of cluster resources. This is often a good practice to ensure AppProjects do + not allow more access than needed. This policy is a combination of two rules which + enforce that all AppProjects specify clusterResourceBlacklist and that their group + and kind have wildcards as values. +spec: + validationFailureAction: Audit + background: true + rules: + - name: has-wildcard-and-validate-clusterresourceblacklist + match: + any: + - resources: + kinds: + - AppProject + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.clusterResourceBlacklist)" + message: "AppProject must specify clusterResourceBlacklist." + - expression: "object.spec.clusterResourceBlacklist.all(element, element.group.contains('*') && element.kind.contains('*'))" + message: "Wildcards must be present in group and kind for clusterResourceBlacklist." + + +``` diff --git a/content/en/policies/argo/argo-cluster-generation-from-rancher-capi/argo-cluster-generation-from-rancher-capi.md b/content/en/policies/argo/argo-cluster-generation-from-rancher-capi/argo-cluster-generation-from-rancher-capi.md index 25993b84d..3998d7e43 100644 --- a/content/en/policies/argo/argo-cluster-generation-from-rancher-capi/argo-cluster-generation-from-rancher-capi.md +++ b/content/en/policies/argo/argo-cluster-generation-from-rancher-capi/argo-cluster-generation-from-rancher-capi.md @@ -25,7 +25,7 @@ metadata: policies.kyverno.io/minversion: 1.7.0 kyverno.io/kubernetes-version: "1.23" policies.kyverno.io/description: >- - This policy generates and synchronizes Argo CD cluster secrets from Rancher + This policy generates and synchronizes Argo CD cluster secrets from Rancher managed cluster.provisioning.cattle.io/v1 resources and their corresponding CAPI secrets. In this solution, Argo CD integrates with Rancher managed clusters via the central Rancher authentication proxy which shares the network endpoint of the Rancher API/GUI. @@ -33,7 +33,7 @@ metadata: "Cluster-API cluster auto-registration" and Rancher issue https://github.com/rancher/rancher/issues/38053 "Fix type and labels Rancher v2 provisioner specifies when creating CAPI Cluster Secret". spec: - generateExistingOnPolicyUpdate: true + generateExisting: true rules: - name: source-rancher-non-local-cluster-and-capi-secret match: diff --git a/content/en/policies/aws-cel/require-encryption-aws-loadbalancers/require-encryption-aws-loadbalancers.md b/content/en/policies/aws-cel/require-encryption-aws-loadbalancers/require-encryption-aws-loadbalancers.md new file mode 100644 index 000000000..7bfa0549d --- /dev/null +++ b/content/en/policies/aws-cel/require-encryption-aws-loadbalancers/require-encryption-aws-loadbalancers.md @@ -0,0 +1,56 @@ +--- +title: "Require Encryption with AWS LoadBalancers in CEL expressions" +category: AWS, EKS Best Practices in CEL +version: +subject: Service +policyType: "validate" +description: > + Services of type LoadBalancer when deployed inside AWS have support for transport encryption if it is enabled via an annotation. This policy requires that Services of type LoadBalancer contain the annotation service.beta.kubernetes.io/aws-load-balancer-ssl-cert with some value. +--- + +## Policy Definition +/aws-cel/require-encryption-aws-loadbalancers/require-encryption-aws-loadbalancers.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-encryption-aws-loadbalancers + annotations: + policies.kyverno.io/title: Require Encryption with AWS LoadBalancers in CEL expressions + policies.kyverno.io/category: AWS, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Service + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Services of type LoadBalancer when deployed inside AWS have support for + transport encryption if it is enabled via an annotation. This policy requires + that Services of type LoadBalancer contain the annotation + service.beta.kubernetes.io/aws-load-balancer-ssl-cert with some value. +spec: + validationFailureAction: Audit + background: true + rules: + - name: aws-loadbalancer-has-ssl-cert + match: + any: + - resources: + kinds: + - Service + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "type-should-be-load-balancer" + expression: "object.spec.type == 'LoadBalancer'" + validate: + cel: + expressions: + - expression: >- + has(object.metadata.annotations) && + 'service.beta.kubernetes.io/aws-load-balancer-ssl-cert' in object.metadata.annotations && object.metadata.annotations['service.beta.kubernetes.io/aws-load-balancer-ssl-cert'] != '' + message: "Service of type LoadBalancer must carry the annotation service.beta.kubernetes.io/aws-load-balancer-ssl-cert." + + +``` diff --git a/content/en/policies/best-practices-cel/disallow-cri-sock-mount/disallow-cri-sock-mount.md b/content/en/policies/best-practices-cel/disallow-cri-sock-mount/disallow-cri-sock-mount.md new file mode 100644 index 000000000..759e17a4f --- /dev/null +++ b/content/en/policies/best-practices-cel/disallow-cri-sock-mount/disallow-cri-sock-mount.md @@ -0,0 +1,77 @@ +--- +title: "Disallow CRI socket mounts in CEL expressions" +category: Best Practices, EKS Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Container daemon socket bind mounts allows access to the container engine on the node. This access can be used for privilege escalation and to manage containers outside of Kubernetes, and hence should not be allowed. This policy validates that the sockets used for CRI engines Docker, Containerd, and CRI-O are not used. In addition to or replacement of this policy, preventing users from mounting the parent directories (/var/run and /var) may be necessary to completely prevent socket bind mounts. +--- + +## Policy Definition +/best-practices-cel/disallow-cri-sock-mount/disallow-cri-sock-mount.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-container-sock-mounts + annotations: + policies.kyverno.io/title: Disallow CRI socket mounts in CEL expressions + policies.kyverno.io/category: Best Practices, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Container daemon socket bind mounts allows access to the container engine on the + node. This access can be used for privilege escalation and to manage containers + outside of Kubernetes, and hence should not be allowed. This policy validates that + the sockets used for CRI engines Docker, Containerd, and CRI-O are not used. In addition + to or replacement of this policy, preventing users from mounting the parent directories + (/var/run and /var) may be necessary to completely prevent socket bind mounts. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-socket-mounts + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: hasVolumes + expression: "!has(object.spec.volumes)" + - name: volumes + expression: "object.spec.volumes" + - name: volumesWithHostPath + expression: "variables.volumes.filter(volume, has(volume.hostPath))" + expressions: + - expression: >- + variables.hasVolumes || + variables.volumesWithHostPath.all(volume, !volume.hostPath.path.matches('/var/run/docker.sock')) + message: "Use of the Docker Unix socket is not allowed." + + - expression: >- + variables.hasVolumes || + variables.volumesWithHostPath.all(volume, !volume.hostPath.path.matches('/var/run/containerd/containerd.sock')) + message: "Use of the Containerd Unix socket is not allowed." + + - expression: >- + variables.hasVolumes || + variables.volumesWithHostPath.all(volume, !volume.hostPath.path.matches('/var/run/crio/crio.sock')) + message: "Use of the CRI-O Unix socket is not allowed." + + - expression: >- + variables.hasVolumes || + variables.volumesWithHostPath.all(volume, !volume.hostPath.path.matches('/var/run/cri-dockerd.sock')) + message: "Use of the Docker CRI socket is not allowed." + + +``` diff --git a/content/en/policies/best-practices-cel/disallow-default-namespace/disallow-default-namespace.md b/content/en/policies/best-practices-cel/disallow-default-namespace/disallow-default-namespace.md new file mode 100644 index 000000000..32f697eab --- /dev/null +++ b/content/en/policies/best-practices-cel/disallow-default-namespace/disallow-default-namespace.md @@ -0,0 +1,72 @@ +--- +title: "Disallow Default Namespace in CEL expressions" +category: Multi-Tenancy in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Kubernetes Namespaces are an optional feature that provide a way to segment and isolate cluster resources across multiple applications and users. As a best practice, workloads should be isolated with Namespaces. Namespaces should be required and the default (empty) Namespace should not be used. This policy validates that Pods specify a Namespace name other than `default`. Rule auto-generation is disabled here due to Pod controllers need to specify the `namespace` field under the top-level `metadata` object and not at the Pod template level. +--- + +## Policy Definition +/best-practices-cel/disallow-default-namespace/disallow-default-namespace.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-default-namespace + annotations: + pod-policies.kyverno.io/autogen-controllers: none + policies.kyverno.io/title: Disallow Default Namespace in CEL expressions + policies.kyverno.io/minversion: 1.11.0 + policies.kyverno.io/category: Multi-Tenancy in CEL + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Kubernetes Namespaces are an optional feature that provide a way to segment and + isolate cluster resources across multiple applications and users. As a best + practice, workloads should be isolated with Namespaces. Namespaces should be required + and the default (empty) Namespace should not be used. This policy validates that Pods + specify a Namespace name other than `default`. Rule auto-generation is disabled here + due to Pod controllers need to specify the `namespace` field under the top-level `metadata` + object and not at the Pod template level. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-namespace + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "namespaceObject.metadata.name != 'default'" + message: "Using 'default' namespace is not allowed." + - name: validate-podcontroller-namespace + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "namespaceObject.metadata.name != 'default'" + message: "Using 'default' namespace is not allowed for pod controllers." + + +``` diff --git a/content/en/policies/best-practices-cel/disallow-empty-ingress-host/disallow-empty-ingress-host.md b/content/en/policies/best-practices-cel/disallow-empty-ingress-host/disallow-empty-ingress-host.md new file mode 100644 index 000000000..56bef4a86 --- /dev/null +++ b/content/en/policies/best-practices-cel/disallow-empty-ingress-host/disallow-empty-ingress-host.md @@ -0,0 +1,52 @@ +--- +title: "Disallow empty Ingress host in CEL expressions" +category: Best Practices in CEL +version: 1.11.0 +subject: Ingress +policyType: "validate" +description: > + An ingress resource needs to define an actual host name in order to be valid. This policy ensures that there is a hostname for each rule defined. +--- + +## Policy Definition +/best-practices-cel/disallow-empty-ingress-host/disallow-empty-ingress-host.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-empty-ingress-host + annotations: + policies.kyverno.io/title: Disallow empty Ingress host in CEL expressions + policies.kyverno.io/category: Best Practices in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Ingress + policies.kyverno.io/description: >- + An ingress resource needs to define an actual host name + in order to be valid. This policy ensures that there is a + hostname for each rule defined. +spec: + validationFailureAction: Audit + background: false + rules: + - name: disallow-empty-ingress-host + match: + any: + - resources: + kinds: + - Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.rules) || + object.spec.rules.all(rule, has(rule.host) && has(rule.http)) + message: "The Ingress host name must be defined, not empty." + + +``` diff --git a/content/en/policies/best-practices-cel/disallow-helm-tiller/disallow-helm-tiller.md b/content/en/policies/best-practices-cel/disallow-helm-tiller/disallow-helm-tiller.md new file mode 100644 index 000000000..18235e254 --- /dev/null +++ b/content/en/policies/best-practices-cel/disallow-helm-tiller/disallow-helm-tiller.md @@ -0,0 +1,52 @@ +--- +title: "Disallow Helm Tiller in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Tiller, found in Helm v2, has known security challenges. It requires administrative privileges and acts as a shared resource accessible to any authenticated user. Tiller can lead to privilege escalation as restricted users can impact other users. It is recommend to use Helm v3+ which does not contain Tiller for these reasons. This policy validates that there is not an image containing the name `tiller`. +--- + +## Policy Definition +/best-practices-cel/disallow-helm-tiller/disallow-helm-tiller.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-helm-tiller + annotations: + policies.kyverno.io/title: Disallow Helm Tiller in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Tiller, found in Helm v2, has known security challenges. It requires administrative privileges and acts as a shared + resource accessible to any authenticated user. Tiller can lead to privilege escalation as + restricted users can impact other users. It is recommend to use Helm v3+ which does not contain + Tiller for these reasons. This policy validates that there is not an image + containing the name `tiller`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-helm-tiller + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.containers.all(container, !container.image.contains('tiller'))" + message: "Helm Tiller is not allowed" + + +``` diff --git a/content/en/policies/best-practices-cel/disallow-latest-tag/disallow-latest-tag.md b/content/en/policies/best-practices-cel/disallow-latest-tag/disallow-latest-tag.md new file mode 100644 index 000000000..68b1881c2 --- /dev/null +++ b/content/en/policies/best-practices-cel/disallow-latest-tag/disallow-latest-tag.md @@ -0,0 +1,53 @@ +--- +title: "Disallow Latest Tag in CEL expressions" +category: Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + The ':latest' tag is mutable and can lead to unexpected errors if the image changes. A best practice is to use an immutable tag that maps to a specific version of an application Pod. This policy validates that the image specifies a tag and that it is not called `latest`. +--- + +## Policy Definition +/best-practices-cel/disallow-latest-tag/disallow-latest-tag.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: disallow-latest-tag + annotations: + policies.kyverno.io/title: Disallow Latest Tag in CEL expressions + policies.kyverno.io/category: Best Practices in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + The ':latest' tag is mutable and can lead to unexpected errors if the + image changes. A best practice is to use an immutable tag that maps to + a specific version of an application Pod. This policy validates that the image + specifies a tag and that it is not called `latest`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: require-and-validate-image-tag + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.containers.all(container, container.image.contains(':'))" + message: "An image tag is required." + - expression: "object.spec.containers.all(container, !container.image.endsWith(':latest'))" + message: "Using a mutable image tag e.g. 'latest' is not allowed." + + +``` diff --git a/content/en/policies/best-practices-cel/require-drop-all/require-drop-all.md b/content/en/policies/best-practices-cel/require-drop-all/require-drop-all.md new file mode 100644 index 000000000..6f9a8eec4 --- /dev/null +++ b/content/en/policies/best-practices-cel/require-drop-all/require-drop-all.md @@ -0,0 +1,60 @@ +--- +title: "Drop All Capabilities in CEL expressions" +category: Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Capabilities permit privileged actions without giving full root access. All capabilities should be dropped from a Pod, with only those required added back. This policy ensures that all containers explicitly specify the `drop: ["ALL"]` ability. Note that this policy also illustrates how to cover drop entries in any case although this may not strictly conform to the Pod Security Standards. +--- + +## Policy Definition +/best-practices-cel/require-drop-all/require-drop-all.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: drop-all-capabilities + annotations: + policies.kyverno.io/title: Drop All Capabilities in CEL expressions + policies.kyverno.io/category: Best Practices in CEL + policies.kyverno.io/severity: medium + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/minversion: 1.11.0 + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Capabilities permit privileged actions without giving full root access. All + capabilities should be dropped from a Pod, with only those required added back. + This policy ensures that all containers explicitly specify the `drop: ["ALL"]` + ability. Note that this policy also illustrates how to cover drop entries in any + case although this may not strictly conform to the Pod Security Standards. +spec: + validationFailureAction: Audit + background: true + rules: + - name: require-drop-all + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: >- + variables.allContainers.all(container, + has(container.securityContext) && + has(container.securityContext.capabilities) && + has(container.securityContext.capabilities.drop) && + container.securityContext.capabilities.drop.exists(capability, capability.upperAscii() == 'ALL')) + message: "Containers must drop `ALL` capabilities." + + +``` diff --git a/content/en/policies/best-practices-cel/require-drop-cap-net-raw/require-drop-cap-net-raw.md b/content/en/policies/best-practices-cel/require-drop-cap-net-raw/require-drop-cap-net-raw.md new file mode 100644 index 000000000..134b8d1eb --- /dev/null +++ b/content/en/policies/best-practices-cel/require-drop-cap-net-raw/require-drop-cap-net-raw.md @@ -0,0 +1,62 @@ +--- +title: "Drop CAP_NET_RAW in CEL expressions" +category: Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Capabilities permit privileged actions without giving full root access. The CAP_NET_RAW capability, enabled by default, allows processes in a container to forge packets and bind to any interface potentially leading to MitM attacks. This policy ensures that all containers explicitly drop the CAP_NET_RAW ability. Note that this policy also illustrates how to cover drop entries in any case although this may not strictly conform to the Pod Security Standards. +--- + +## Policy Definition +/best-practices-cel/require-drop-cap-net-raw/require-drop-cap-net-raw.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: drop-cap-net-raw + annotations: + policies.kyverno.io/title: Drop CAP_NET_RAW in CEL expressions + policies.kyverno.io/category: Best Practices in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Capabilities permit privileged actions without giving full root access. The + CAP_NET_RAW capability, enabled by default, allows processes in a container to + forge packets and bind to any interface potentially leading to MitM attacks. + This policy ensures that all containers explicitly drop the CAP_NET_RAW + ability. Note that this policy also illustrates how to cover drop entries in any + case although this may not strictly conform to the Pod Security Standards. +spec: + validationFailureAction: Audit + background: true + rules: + - name: require-drop-cap-net-raw + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: >- + variables.allContainers.all(container, + has(container.securityContext) && + has(container.securityContext.capabilities) && + has(container.securityContext.capabilities.drop) && + container.securityContext.capabilities.drop.exists(capability, capability.upperAscii() == 'CAP_NET_RAW')) + message: >- + Containers must drop the `CAP_NET_RAW` capability. + + +``` diff --git a/content/en/policies/best-practices-cel/require-labels/require-labels.md b/content/en/policies/best-practices-cel/require-labels/require-labels.md new file mode 100644 index 000000000..3bdb82cc1 --- /dev/null +++ b/content/en/policies/best-practices-cel/require-labels/require-labels.md @@ -0,0 +1,53 @@ +--- +title: "Require Labels in CEL expressions" +category: Best Practices in CEL +version: 1.11.0 +subject: Pod, Label +policyType: "validate" +description: > + Define and use labels that identify semantic attributes of your application or Deployment. A common set of labels allows tools to work collaboratively, describing objects in a common manner that all tools can understand. The recommended labels describe applications in a way that can be queried. This policy validates that the label `app.kubernetes.io/name` is specified with some value. +--- + +## Policy Definition +/best-practices-cel/require-labels/require-labels.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-labels + annotations: + policies.kyverno.io/title: Require Labels in CEL expressions + policies.kyverno.io/category: Best Practices in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod, Label + policies.kyverno.io/description: >- + Define and use labels that identify semantic attributes of your application or Deployment. + A common set of labels allows tools to work collaboratively, describing objects in a common manner that + all tools can understand. The recommended labels describe applications in a way that can be + queried. This policy validates that the label `app.kubernetes.io/name` is specified with some value. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-for-labels + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.metadata.labels) && + 'app.kubernetes.io/name' in object.metadata.labels && object.metadata.labels['app.kubernetes.io/name'] != "" + message: "The label `app.kubernetes.io/name` is required." + + +``` diff --git a/content/en/policies/best-practices-cel/require-pod-requests-limits/require-pod-requests-limits.md b/content/en/policies/best-practices-cel/require-pod-requests-limits/require-pod-requests-limits.md new file mode 100644 index 000000000..6bd6bafdf --- /dev/null +++ b/content/en/policies/best-practices-cel/require-pod-requests-limits/require-pod-requests-limits.md @@ -0,0 +1,60 @@ +--- +title: "Require Limits and Requests in CEL expressions" +category: Best Practices, EKS Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + As application workloads share cluster resources, it is important to limit resources requested and consumed by each Pod. It is recommended to require resource requests and limits per Pod, especially for memory and CPU. If a Namespace level request or limit is specified, defaults will automatically be applied to each Pod based on the LimitRange configuration. This policy validates that all containers have something specified for memory and CPU requests and memory limits. +--- + +## Policy Definition +/best-practices-cel/require-pod-requests-limits/require-pod-requests-limits.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-requests-limits + annotations: + policies.kyverno.io/title: Require Limits and Requests in CEL expressions + policies.kyverno.io/category: Best Practices, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + As application workloads share cluster resources, it is important to limit resources + requested and consumed by each Pod. It is recommended to require resource requests and + limits per Pod, especially for memory and CPU. If a Namespace level request or limit is specified, + defaults will automatically be applied to each Pod based on the LimitRange configuration. + This policy validates that all containers have something specified for memory and CPU + requests and memory limits. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-resources + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + has(container.resources) && + has(container.resources.requests) && + has(container.resources.requests.memory) && + has(container.resources.requests.cpu) && + has(container.resources.limits) && + has(container.resources.limits.memory)) + message: "CPU and memory resource requests and limits are required." + + +``` diff --git a/content/en/policies/best-practices-cel/require-probes/require-probes.md b/content/en/policies/best-practices-cel/require-probes/require-probes.md new file mode 100644 index 000000000..522f00df4 --- /dev/null +++ b/content/en/policies/best-practices-cel/require-probes/require-probes.md @@ -0,0 +1,59 @@ +--- +title: "Require Pod Probes in CEL expressions" +category: Best Practices, EKS Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Liveness and readiness probes need to be configured to correctly manage a Pod's lifecycle during deployments, restarts, and upgrades. For each Pod, a periodic `livenessProbe` is performed by the kubelet to determine if the Pod's containers are running or need to be restarted. A `readinessProbe` is used by Services and Deployments to determine if the Pod is ready to receive network traffic. This policy validates that all containers have one of livenessProbe, readinessProbe, or startupProbe defined. +--- + +## Policy Definition +/best-practices-cel/require-probes/require-probes.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-pod-probes + annotations: + pod-policies.kyverno.io/autogen-controllers: DaemonSet,Deployment,StatefulSet + policies.kyverno.io/title: Require Pod Probes in CEL expressions + policies.kyverno.io/category: Best Practices, EKS Best Practices in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Liveness and readiness probes need to be configured to correctly manage a Pod's + lifecycle during deployments, restarts, and upgrades. For each Pod, a periodic + `livenessProbe` is performed by the kubelet to determine if the Pod's containers + are running or need to be restarted. A `readinessProbe` is used by Services + and Deployments to determine if the Pod is ready to receive network traffic. + This policy validates that all containers have one of livenessProbe, readinessProbe, + or startupProbe defined. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-probes + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + has(container.livenessProbe) || + has(container.startupProbe) || + has(container.readinessProbe)) + message: "Liveness, readiness, or startup probes are required for all containers." + + +``` diff --git a/content/en/policies/best-practices-cel/require-ro-rootfs/require-ro-rootfs.md b/content/en/policies/best-practices-cel/require-ro-rootfs/require-ro-rootfs.md new file mode 100644 index 000000000..188eb9ce6 --- /dev/null +++ b/content/en/policies/best-practices-cel/require-ro-rootfs/require-ro-rootfs.md @@ -0,0 +1,55 @@ +--- +title: "Require Read-Only Root Filesystem in CEL expressions" +category: Best Practices, EKS Best Practices, PSP Migration in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + A read-only root file system helps to enforce an immutable infrastructure strategy; the container only needs to write on the mounted volume that persists the state. An immutable root filesystem can also prevent malicious binaries from writing to the host system. This policy validates that containers define a securityContext with `readOnlyRootFilesystem: true`. +--- + +## Policy Definition +/best-practices-cel/require-ro-rootfs/require-ro-rootfs.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-ro-rootfs + annotations: + policies.kyverno.io/title: Require Read-Only Root Filesystem in CEL expressions + policies.kyverno.io/category: Best Practices, EKS Best Practices, PSP Migration in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/minversion: 1.11.0 + policies.kyverno.io/description: >- + A read-only root file system helps to enforce an immutable infrastructure strategy; + the container only needs to write on the mounted volume that persists the state. + An immutable root filesystem can also prevent malicious binaries from writing to the + host system. This policy validates that containers define a securityContext + with `readOnlyRootFilesystem: true`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-readOnlyRootFilesystem + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + has(container.securityContext) && + container.securityContext.readOnlyRootFilesystem == true) + message: "Root filesystem must be read-only." + + +``` diff --git a/content/en/policies/best-practices-cel/restrict-image-registries/restrict-image-registries.md b/content/en/policies/best-practices-cel/restrict-image-registries/restrict-image-registries.md new file mode 100644 index 000000000..6f6471e3e --- /dev/null +++ b/content/en/policies/best-practices-cel/restrict-image-registries/restrict-image-registries.md @@ -0,0 +1,55 @@ +--- +title: "Restrict Image Registries in CEL expressions" +category: Best Practices, EKS Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Images from unknown, public registries can be of dubious quality and may not be scanned and secured, representing a high degree of risk. Requiring use of known, approved registries helps reduce threat exposure by ensuring image pulls only come from them. This policy validates that container images only originate from the registry `eu.foo.io` or `bar.io`. Use of this policy requires customization to define your allowable registries. +--- + +## Policy Definition +/best-practices-cel/restrict-image-registries/restrict-image-registries.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-image-registries + annotations: + policies.kyverno.io/title: Restrict Image Registries in CEL expressions + policies.kyverno.io/category: Best Practices, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Images from unknown, public registries can be of dubious quality and may not be + scanned and secured, representing a high degree of risk. Requiring use of known, approved + registries helps reduce threat exposure by ensuring image pulls only come from them. This + policy validates that container images only originate from the registry `eu.foo.io` or + `bar.io`. Use of this policy requires customization to define your allowable registries. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-registries + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: "variables.allContainers.all(container, container.image.startsWith('eu.foo.io/') || container.image.startsWith('bar.io/'))" + message: "Unknown image registry." + + +``` diff --git a/content/en/policies/best-practices-cel/restrict-node-port/restrict-node-port.md b/content/en/policies/best-practices-cel/restrict-node-port/restrict-node-port.md new file mode 100644 index 000000000..8bd330469 --- /dev/null +++ b/content/en/policies/best-practices-cel/restrict-node-port/restrict-node-port.md @@ -0,0 +1,52 @@ +--- +title: "Disallow NodePort in CEL expressions" +category: Best Practices in CEL +version: 1.11.0 +subject: Service +policyType: "validate" +description: > + A Kubernetes Service of type NodePort uses a host port to receive traffic from any source. A NetworkPolicy cannot be used to control traffic to host ports. Although NodePort Services can be useful, their use must be limited to Services with additional upstream security checks. This policy validates that any new Services do not use the `NodePort` type. +--- + +## Policy Definition +/best-practices-cel/restrict-node-port/restrict-node-port.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-nodeport + annotations: + policies.kyverno.io/title: Disallow NodePort in CEL expressions + policies.kyverno.io/category: Best Practices in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Service + policies.kyverno.io/description: >- + A Kubernetes Service of type NodePort uses a host port to receive traffic from + any source. A NetworkPolicy cannot be used to control traffic to host ports. + Although NodePort Services can be useful, their use must be limited to Services + with additional upstream security checks. This policy validates that any new Services + do not use the `NodePort` type. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-nodeport + match: + any: + - resources: + kinds: + - Service + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.type) ? (object.spec.type != 'NodePort') : true" + message: "Services of type NodePort are not allowed." + + +``` diff --git a/content/en/policies/best-practices-cel/restrict-service-external-ips/restrict-service-external-ips.md b/content/en/policies/best-practices-cel/restrict-service-external-ips/restrict-service-external-ips.md new file mode 100644 index 000000000..66a96b00d --- /dev/null +++ b/content/en/policies/best-practices-cel/restrict-service-external-ips/restrict-service-external-ips.md @@ -0,0 +1,55 @@ +--- +title: "Restrict External IPs in CEL expressions" +category: Best Practices in CEL +version: 1.11.0 +subject: Service +policyType: "validate" +description: > + Service externalIPs can be used for a MITM attack (CVE-2020-8554). Restrict externalIPs or limit to a known set of addresses. See: https://github.com/kyverno/kyverno/issues/1367. This policy validates that the `externalIPs` field is not set on a Service. +--- + +## Policy Definition +/best-practices-cel/restrict-service-external-ips/restrict-service-external-ips.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-external-ips + annotations: + policies.kyverno.io/title: Restrict External IPs in CEL expressions + policies.kyverno.io/category: Best Practices in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Service + policies.kyverno.io/description: >- + Service externalIPs can be used for a MITM attack (CVE-2020-8554). + Restrict externalIPs or limit to a known set of addresses. + See: https://github.com/kyverno/kyverno/issues/1367. This policy validates + that the `externalIPs` field is not set on a Service. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-ips + match: + any: + - resources: + kinds: + - Service + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.spec.externalIPs)" + # restrict external IP addresses + # you can alternatively restrict to a known set of addresses using: + # !has(object.spec.externalIPs) || + # object.spec.externalIPs.all(ip, ip in ["37.10.11.53", "153.10.20.1"]) + message: "externalIPs are not allowed." + + +``` diff --git a/content/en/policies/best-practices/disallow-empty-ingress-host/disallow-empty-ingress-host.md b/content/en/policies/best-practices/disallow-empty-ingress-host/disallow-empty-ingress-host.md index e06cc922e..c70cdde81 100644 --- a/content/en/policies/best-practices/disallow-empty-ingress-host/disallow-empty-ingress-host.md +++ b/content/en/policies/best-practices/disallow-empty-ingress-host/disallow-empty-ingress-host.md @@ -27,7 +27,7 @@ metadata: in order to be valid. This policy ensures that there is a hostname for each rule defined. spec: - validationFailureAction: enforce + validationFailureAction: Audit background: false rules: - name: disallow-empty-ingress-host diff --git a/content/en/policies/best-practices/require-pod-requests-limits/require-pod-requests-limits.md b/content/en/policies/best-practices/require-pod-requests-limits/require-pod-requests-limits.md index 09a8c83c0..09b2e1fdc 100644 --- a/content/en/policies/best-practices/require-pod-requests-limits/require-pod-requests-limits.md +++ b/content/en/policies/best-practices/require-pod-requests-limits/require-pod-requests-limits.md @@ -50,4 +50,18 @@ spec: cpu: "?*" limits: memory: "?*" + initContainers: + - resources: + requests: + memory: "?*" + cpu: "?*" + limits: + memory: "?*" + ephemeralContainers: + - resources: + requests: + memory: "?*" + cpu: "?*" + limits: + memory: "?*" ``` diff --git a/content/en/policies/cleanup/cleanup-empty-replicasets/cleanup-empty-replicasets.md b/content/en/policies/cleanup/cleanup-empty-replicasets/cleanup-empty-replicasets.md index 35946873e..72fafb88e 100644 --- a/content/en/policies/cleanup/cleanup-empty-replicasets/cleanup-empty-replicasets.md +++ b/content/en/policies/cleanup/cleanup-empty-replicasets/cleanup-empty-replicasets.md @@ -5,13 +5,14 @@ version: 1.9.0 subject: ReplicaSet policyType: "cleanUp" description: > - ReplicaSets are an intermediary controller to several Pod controllers such as Deployments. When a new version of a Deployment is created, it spawns a new ReplicaSet with the desired number of replicas and scale the current one to zero. This can have the effect of leaving many empty ReplicaSets in the cluster which can create clutter and false positives if policy reports are enabled. This cleanup policy removes all empty ReplicaSets across the cluster. Note that removing empty ReplicaSets may prevent rollbacks. + ReplicaSets serve as an intermediate controller for various Pod controllers like Deployments. When a new version of a Deployment is initiated, it generates a new ReplicaSet with the specified number of replicas and scales down the current one to zero. Consequently, numerous empty ReplicaSets may accumulate in the cluster, leading to clutter and potential false positives in policy reports if enabled. This cleanup policy is designed to remove empty ReplicaSets across the cluster within a specified timeframe, for instance, ReplicaSets created one day ago, ensuring the ability to rollback to previous ReplicaSets in case of deployment issues --- ## Policy Definition /cleanup/cleanup-empty-replicasets/cleanup-empty-replicasets.yaml ```yaml +#The described logic currently deletes the ReplicaSets created 30 seconds ago. You can adjust this timeframe according to your specific requirements. apiVersion: kyverno.io/v2beta1 kind: ClusterCleanupPolicy metadata: @@ -25,23 +26,26 @@ metadata: policies.kyverno.io/minversion: 1.9.0 kyverno.io/kubernetes-version: "1.27" policies.kyverno.io/description: >- - ReplicaSets are an intermediary controller to several Pod controllers such as Deployments. - When a new version of a Deployment is created, it spawns a new ReplicaSet with the desired - number of replicas and scale the current one to zero. This can have the effect of leaving - many empty ReplicaSets in the cluster which can create clutter and false positives if policy - reports are enabled. This cleanup policy removes all empty ReplicaSets across the cluster. - Note that removing empty ReplicaSets may prevent rollbacks. + ReplicaSets serve as an intermediate controller for various Pod controllers like Deployments. When a new version of a Deployment is initiated, it generates a new ReplicaSet with the specified number of replicas and scales down the current one to zero. Consequently, numerous empty ReplicaSets may accumulate in the cluster, leading to clutter and potential false positives in policy reports if enabled. This cleanup policy is designed to remove empty ReplicaSets across the cluster within a specified timeframe, for instance, ReplicaSets created one day ago, ensuring the ability to rollback to previous ReplicaSets in case of deployment issues spec: match: any: - resources: kinds: - ReplicaSet + exclude: + any: + - resources: + namespaces: + - kube-system conditions: all: - key: "{{ target.spec.replicas }}" operator: Equals value: 0 - schedule: "*/5 * * * *" + - key: "{{ time_diff('{{target.metadata.creationTimestamp}}','{{ time_now_utc() }}') }}" + operator: GreaterThan + value: "0h0m30s" + schedule: "*/1 * * * *" ``` diff --git a/content/en/policies/consul-cel/enforce-min-tls-version/enforce-min-tls-version.md b/content/en/policies/consul-cel/enforce-min-tls-version/enforce-min-tls-version.md new file mode 100644 index 000000000..814ab21e8 --- /dev/null +++ b/content/en/policies/consul-cel/enforce-min-tls-version/enforce-min-tls-version.md @@ -0,0 +1,51 @@ +--- +title: "Enforce Consul min TLS version in CEL expressions" +category: Consul in CEL +version: 1.11.0 +subject: Mesh +policyType: "validate" +description: > + This policy will check the TLS Min version to ensure that whenever the mesh is set, there is a minimum version of TLS set for all the service mesh proxies and this enforces that service mesh mTLS traffic uses TLS v1.2 or newer. +--- + +## Policy Definition +/consul-cel/enforce-min-tls-version/enforce-min-tls-version.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-min-tls-version + annotations: + policies.kyverno.io/title: Enforce Consul min TLS version in CEL expressions + policies.kyverno.io/category: Consul in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Mesh + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + This policy will check the TLS Min version to ensure that whenever the mesh is set, there is a minimum version of TLS set for all the service mesh proxies and this enforces that service mesh mTLS traffic uses TLS v1.2 or newer. +spec: + validationFailureAction: Enforce + background: true + rules: + - name: check-for-tls-version + match: + any: + - resources: + kinds: + - Mesh + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.spec) && has(object.spec.tls) && has(object.spec.tls.incoming) && + has(object.spec.tls.incoming.tlsMinVersion) && object.spec.tls.incoming.tlsMinVersion == 'TLSv1_2' + message: The minimum version of TLS is TLS v1_2 + + +``` diff --git a/content/en/policies/flux-cel/verify-flux-sources/verify-flux-sources.md b/content/en/policies/flux-cel/verify-flux-sources/verify-flux-sources.md new file mode 100644 index 000000000..76bed366d --- /dev/null +++ b/content/en/policies/flux-cel/verify-flux-sources/verify-flux-sources.md @@ -0,0 +1,115 @@ +--- +title: "Verify Flux Sources in CEL expressions" +category: Flux in CEL +version: 1.11.0 +subject: GitRepository, Bucket, HelmRepository, ImageRepository +policyType: "validate" +description: > + Flux source APIs include a number of different sources such as GitRepository, Bucket, HelmRepository, and ImageRepository resources. Each of these by default can be pointed to any location. In a production environment, it may be desired to restrict these to only known sources to prevent accessing outside sources. This policy verifies that each of the Flux sources comes from a trusted location. +--- + +## Policy Definition +/flux-cel/verify-flux-sources/verify-flux-sources.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: verify-flux-sources + annotations: + policies.kyverno.io/title: Verify Flux Sources in CEL expressions + policies.kyverno.io/category: Flux in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: GitRepository, Bucket, HelmRepository, ImageRepository + policies.kyverno.io/description: >- + Flux source APIs include a number of different sources such as + GitRepository, Bucket, HelmRepository, and ImageRepository resources. Each of these + by default can be pointed to any location. In a production environment, + it may be desired to restrict these to only known sources to prevent + accessing outside sources. This policy verifies that each of the Flux + sources comes from a trusted location. +spec: + validationFailureAction: Audit + rules: + - name: flux-github-repositories + match: + any: + - resources: + kinds: + - GitRepository + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "object.spec.url.startsWith('https://github.com/myorg/') || object.spec.url.startsWith('ssh://git@github.com:myorg/')" + message: ".spec.url must be from a repository within the myorg organization." + - name: flux-buckets + match: + any: + - resources: + kinds: + - Bucket + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "has(object.spec.endpoint) && object.spec.endpoint.endsWith('.myorg.com')" + message: ".spec.endpoint must reference an address within the myorg organization." + - name: flux-helm-repositories + match: + any: + - resources: + kinds: + - HelmRepository + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "object.spec.url.matches('^https://[a-zA-Z0-9-]+[.]myorg[.]com/.*$')" + message: ".spec.url must be from a repository within the myorg organization." + - name: flux-image-repositories + match: + any: + - resources: + kinds: + - ImageRepository + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "has(object.spec.image) && object.spec.image.startsWith('ghcr.io/myorg/')" + message: ".spec.image must be from an image repository within the myorg organization." + + +``` diff --git a/content/en/policies/flux-cel/verify-git-repositories/verify-git-repositories.md b/content/en/policies/flux-cel/verify-git-repositories/verify-git-repositories.md new file mode 100644 index 000000000..70c05ecb5 --- /dev/null +++ b/content/en/policies/flux-cel/verify-git-repositories/verify-git-repositories.md @@ -0,0 +1,55 @@ +--- +title: "Verify Git Repositories in CEL expressions" +category: Flux in CEL +version: 1.11.0 +subject: GitRepository +policyType: "validate" +description: > + Ensures that Git repositories used for Flux deployments in a cluster originate from a specific, trusted organization. Prevents the use of untrusted or potentially risky Git repositories. Protects the integrity and security of Flux deployments. +--- + +## Policy Definition +/flux-cel/verify-git-repositories/verify-git-repositories.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: verify-git-repositories + annotations: + policies.kyverno.io/title: Verify Git Repositories in CEL expressions + policies.kyverno.io/category: Flux in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: GitRepository + policies.kyverno.io/description: >- + Ensures that Git repositories used for Flux deployments + in a cluster originate from a specific, trusted organization. + Prevents the use of untrusted or potentially risky Git repositories. + Protects the integrity and security of Flux deployments. +spec: + validationFailureAction: Audit + rules: + - name: github-repositories-only + match: + any: + - resources: + kinds: + - GitRepository + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + namespaces: + - flux-system + validate: + cel: + expressions: + - expression: "object.spec.url.startsWith('https://github.com/fluxcd/') || object.spec.url.startsWith('ssh://git@github.com:fluxcd/')" + message: .spec.url must be from a repository within the organisation X + + +``` diff --git a/content/en/policies/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.md b/content/en/policies/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.md new file mode 100644 index 000000000..f35e5062d --- /dev/null +++ b/content/en/policies/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.md @@ -0,0 +1,50 @@ +--- +title: "Enforce Istio Sidecar Injection in CEL expressions" +category: Istio in CEL +version: 1.11.0 +subject: Namespace +policyType: "validate" +description: > + In order for Istio to inject sidecars to workloads deployed into Namespaces, the label `istio-injection` must be set to `enabled`. This policy ensures that all new Namespaces set `istio-inject` to `enabled`. +--- + +## Policy Definition +/istio-cel/enforce-sidecar-injection-namespace/enforce-sidecar-injection-namespace.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-sidecar-injection-namespace + annotations: + policies.kyverno.io/title: Enforce Istio Sidecar Injection in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Namespace + policies.kyverno.io/description: >- + In order for Istio to inject sidecars to workloads deployed into Namespaces, the label + `istio-injection` must be set to `enabled`. This policy ensures that all new Namespaces + set `istio-inject` to `enabled`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-istio-injection-enabled + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && 'istio-injection' in object.metadata.labels && object.metadata.labels['istio-injection'] == 'enabled'" + message: "All new Namespaces must have Istio sidecar injection enabled." + + +``` diff --git a/content/en/policies/istio-cel/enforce-strict-mtls/enforce-strict-mtls.md b/content/en/policies/istio-cel/enforce-strict-mtls/enforce-strict-mtls.md new file mode 100644 index 000000000..50bec1514 --- /dev/null +++ b/content/en/policies/istio-cel/enforce-strict-mtls/enforce-strict-mtls.md @@ -0,0 +1,56 @@ +--- +title: "Enforce Istio Strict mTLS in CEL expressions" +category: Istio in CEL +version: 1.11.0 +subject: PeerAuthentication +policyType: "validate" +description: > + Strict mTLS requires that mutual TLS be enabled across the entire service mesh, which can be set using a PeerAuthentication resource on a per-Namespace basis and, if set on the `istio-system` Namespace could disable it across the entire mesh. Disabling mTLS can reduce the security for traffic within that portion of the mesh and should be controlled. This policy prevents disabling strict mTLS in a PeerAuthentication resource by requiring the `mode` be set to either `UNSET` or `STRICT`. +--- + +## Policy Definition +/istio-cel/enforce-strict-mtls/enforce-strict-mtls.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: enforce-strict-mtls + annotations: + policies.kyverno.io/title: Enforce Istio Strict mTLS in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: PeerAuthentication + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Strict mTLS requires that mutual TLS be enabled across the entire service mesh, which + can be set using a PeerAuthentication resource on a per-Namespace basis and, if set on + the `istio-system` Namespace could disable it across the entire mesh. Disabling mTLS + can reduce the security for traffic within that portion of the mesh and should be controlled. + This policy prevents disabling strict mTLS in a PeerAuthentication resource by requiring + the `mode` be set to either `UNSET` or `STRICT`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-mtls + match: + any: + - resources: + kinds: + - PeerAuthentication + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec) || !has(object.spec.mtls) || !has(object.spec.mtls.mode) || + object.spec.mtls.mode in ['UNSET', 'STRICT'] + message: "PeerAuthentication resources may only set UNSET or STRICT for the mode." + + +``` diff --git a/content/en/policies/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.md b/content/en/policies/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.md new file mode 100644 index 000000000..8fff8aaa6 --- /dev/null +++ b/content/en/policies/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.md @@ -0,0 +1,54 @@ +--- +title: "Prevent Disabling Istio Sidecar Injection in CEL expressions" +category: Istio in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + One way sidecar injection in an Istio service mesh may be accomplished is by defining an annotation at the Pod level. Pods not receiving a sidecar cannot participate in the mesh thereby reducing visibility. This policy ensures that Pods cannot set the annotation `sidecar.istio.io/inject` to a value of `false`. +--- + +## Policy Definition +/istio-cel/prevent-disabling-injection-pods/prevent-disabling-injection-pods.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-disabling-injection-pods + annotations: + policies.kyverno.io/title: Prevent Disabling Istio Sidecar Injection in CEL expressions + policies.kyverno.io/category: Istio in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + One way sidecar injection in an Istio service mesh may be accomplished is by defining + an annotation at the Pod level. Pods not receiving a sidecar cannot participate in the mesh + thereby reducing visibility. This policy ensures that Pods cannot set the annotation + `sidecar.istio.io/inject` to a value of `false`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: prohibit-inject-annotation + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.metadata.annotations) || !('sidecar.istio.io/inject' in object.metadata.annotations) || + object.metadata.annotations['sidecar.istio.io/inject'] != 'false' + message: "Pods may not disable sidecar injection by setting the annotation sidecar.istio.io/inject to a value of false." + + +``` diff --git a/content/en/policies/istio/service-mesh-disallow-capabilities/service-mesh-disallow-capabilities.md b/content/en/policies/istio/service-mesh-disallow-capabilities/service-mesh-disallow-capabilities.md new file mode 100644 index 000000000..4aa00a510 --- /dev/null +++ b/content/en/policies/istio/service-mesh-disallow-capabilities/service-mesh-disallow-capabilities.md @@ -0,0 +1,101 @@ +--- +title: "Service Mesh Disallow Capabilities" +category: Istio, Linkerd, Pod Security Standards (Baseline) +version: +subject: Pod +policyType: "validate" +description: > + This policy is a variation of the disallow-capabilities policy that is a part of the Pod Security Standards (Baseline) category. It enforces the same control but with provisions for common service mesh initContainers from Istio and Linkerd which need the additional capabilities, NET_ADMIN and NET_RAW. For more information and context, see the Kyverno blog post at https://kyverno.io/blog/2024/02/04/securing-services-meshes-easier-with-kyverno/. +--- + +## Policy Definition +/istio/service-mesh-disallow-capabilities/service-mesh-disallow-capabilities.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: service-mesh-disallow-capabilities + annotations: + policies.kyverno.io/title: Service Mesh Disallow Capabilities + policies.kyverno.io/category: Istio, Linkerd, Pod Security Standards (Baseline) + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.12.3 + kyverno.io/kubernetes-version: "1.28" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + This policy is a variation of the disallow-capabilities policy that is a part of the + Pod Security Standards (Baseline) category. It enforces the same control but with + provisions for common service mesh initContainers from Istio and Linkerd which need + the additional capabilities, NET_ADMIN and NET_RAW. For more information and context, + see the Kyverno blog post at https://kyverno.io/blog/2024/02/04/securing-services-meshes-easier-with-kyverno/. +spec: + validationFailureAction: Audit + background: true + rules: + - name: adding-capabilities-istio-linkerd + match: + any: + - resources: + kinds: + - Pod + preconditions: + all: + - key: "{{ request.operation || 'BACKGROUND' }}" + operator: NotEquals + value: DELETE + context: + - name: capabilities + variable: + value: ["AUDIT_WRITE","CHOWN","DAC_OVERRIDE","FOWNER","FSETID","KILL","MKNOD","NET_BIND_SERVICE","SETFCAP","SETGID","SETPCAP","SETUID","SYS_CHROOT"] + validate: + message: >- + Any capabilities added beyond the allowed list (AUDIT_WRITE, CHOWN, DAC_OVERRIDE, FOWNER, + FSETID, KILL, MKNOD, NET_BIND_SERVICE, SETFCAP, SETGID, SETPCAP, SETUID, SYS_CHROOT) + are disallowed. Service mesh initContainers may additionally add NET_ADMIN and NET_RAW. + foreach: + - list: request.object.spec.initContainers[] + preconditions: + all: + - key: "{{ element.image }}" + operator: AnyIn + value: + - "*/istio/proxyv2*" + - "*/linkerd/proxy-init*" + - key: "{{ element.securityContext.capabilities.add[] || `[]` }}" + operator: AnyNotIn + value: + - NET_ADMIN + - NET_RAW + - "{{ capabilities }}" + deny: + conditions: + all: + - key: "{{ element.securityContext.capabilities.add[] || `[]` }}" + operator: AnyNotIn + value: "{{ capabilities }}" + message: The service mesh initContainer {{ element.name }} is attempting to add forbidden capabilities. + - list: request.object.spec.initContainers[] + preconditions: + all: + - key: "{{ element.image }}" + operator: AnyNotIn + value: + - "*/istio/proxyv2*" + - "*/linkerd/proxy-init*" + deny: + conditions: + all: + - key: "{{ element.securityContext.capabilities.add[] || `[]` }}" + operator: AnyNotIn + value: "{{ capabilities }}" + message: The initContainer {{ element.name }} is attempting to add forbidden capabilities. + - list: request.object.spec.[ephemeralContainers, containers][] + deny: + conditions: + all: + - key: "{{ element.securityContext.capabilities.add[] || `[]` }}" + operator: AnyNotIn + value: "{{ capabilities }}" + message: The container {{ element.name }} is attempting to add forbidden capabilities. +``` diff --git a/content/en/policies/istio/service-mesh-require-run-as-nonroot/service-mesh-require-run-as-nonroot.md b/content/en/policies/istio/service-mesh-require-run-as-nonroot/service-mesh-require-run-as-nonroot.md new file mode 100644 index 000000000..3e1a6b6d5 --- /dev/null +++ b/content/en/policies/istio/service-mesh-require-run-as-nonroot/service-mesh-require-run-as-nonroot.md @@ -0,0 +1,73 @@ +--- +title: "Service Mesh Require runAsNonRoot" +category: Istio, Pod Security Standards (Restricted) +version: +subject: Pod +policyType: "validate" +description: > + This policy is a variation of the Require runAsNonRoot policy that is a part of the Pod Security Standards (Restricted) category. It enforces the same control but with provisions for Istio's initContainer. For more information and context, see the Kyverno blog post at https://kyverno.io/blog/2024/02/04/securing-services-meshes-easier-with-kyverno/. +--- + +## Policy Definition +/istio/service-mesh-require-run-as-nonroot/service-mesh-require-run-as-nonroot.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: service-mesh-require-run-as-nonroot + annotations: + policies.kyverno.io/title: Service Mesh Require runAsNonRoot + policies.kyverno.io/category: Istio, Pod Security Standards (Restricted) + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.12.3 + kyverno.io/kubernetes-version: "1.28" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + This policy is a variation of the Require runAsNonRoot policy that is a part of the + Pod Security Standards (Restricted) category. It enforces the same control but with + provisions for Istio's initContainer. For more information and context, + see the Kyverno blog post at https://kyverno.io/blog/2024/02/04/securing-services-meshes-easier-with-kyverno/. +spec: + validationFailureAction: Audit + background: true + rules: + - name: run-as-non-root-istio + match: + any: + - resources: + kinds: + - Pod + validate: + message: >- + Running as root is not allowed. Either the field spec.securityContext.runAsNonRoot + must be set to `true`, or the fields spec.containers[*].securityContext.runAsNonRoot, + spec.initContainers[*].securityContext.runAsNonRoot, and spec.ephemeralContainers[*].securityContext.runAsNonRoot + must be set to `true`. + anyPattern: + - spec: + securityContext: + runAsNonRoot: true + =(ephemeralContainers): + - =(securityContext): + =(runAsNonRoot): true + =(initContainers): + - (image): "!*istio/proxyv2*" + =(securityContext): + =(runAsNonRoot): true + containers: + - =(securityContext): + =(runAsNonRoot): true + - spec: + =(ephemeralContainers): + - securityContext: + runAsNonRoot: true + =(initContainers): + - (image): "!*istio/proxyv2*" + securityContext: + runAsNonRoot: true + containers: + - securityContext: + runAsNonRoot: true + +``` diff --git a/content/en/policies/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.md b/content/en/policies/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.md new file mode 100644 index 000000000..b7afbb5c2 --- /dev/null +++ b/content/en/policies/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.md @@ -0,0 +1,52 @@ +--- +title: "Check Data Protection By Label in CEL expressions" +category: Kasten K10 by Veeam in CEL +version: 1.11.0 +subject: Deployment, StatefulSet +policyType: "validate" +description: > + Check the 'dataprotection' label that production Deployments and StatefulSet have a named K10 Policy. Use in combination with 'generate' ClusterPolicy to 'generate' a specific K10 Policy by name. +--- + +## Policy Definition +/kasten-cel/k10-data-protection-by-label/k10-data-protection-by-label.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-data-protection-by-label + annotations: + policies.kyverno.io/title: Check Data Protection By Label in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Deployment, StatefulSet + policies.kyverno.io/description: >- + Check the 'dataprotection' label that production Deployments and StatefulSet have a named K10 Policy. + Use in combination with 'generate' ClusterPolicy to 'generate' a specific K10 Policy by name. +spec: + validationFailureAction: Audit + rules: + - name: k10-data-protection-by-label + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + operations: + - CREATE + - UPDATE + selector: + matchLabels: + purpose: production + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && has(object.metadata.labels.dataprotection) && object.metadata.labels.dataprotection.startsWith('k10-')" + message: "Deployments and StatefulSets that specify 'dataprotection' label must have a valid k10-?* name (use labels: dataprotection: k10-)" + + +``` diff --git a/content/en/policies/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.md b/content/en/policies/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.md new file mode 100644 index 000000000..5b08da656 --- /dev/null +++ b/content/en/policies/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.md @@ -0,0 +1,51 @@ +--- +title: "Check Hourly RPO in CEL expressions" +category: Kasten K10 by Veeam in CEL +version: 1.11.0 +subject: Policy +policyType: "validate" +description: > + K10 Policy resources can be educated to adhere to common Recovery Point Objective (RPO) best practices. This policy is advising to use an RPO frequency that with hourly granularity if it has the appPriority: Mission Critical +--- + +## Policy Definition +/kasten-cel/k10-hourly-rpo/k10-hourly-rpo.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-policy-hourly-rpo + annotations: + policies.kyverno.io/title: Check Hourly RPO in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Policy + policies.kyverno.io/description: >- + K10 Policy resources can be educated to adhere to common Recovery Point Objective (RPO) best practices. + This policy is advising to use an RPO frequency that with hourly granularity if it has the appPriority: Mission Critical +spec: + validationFailureAction: Audit + rules: + - name: k10-policy-hourly-rpo + match: + any: + - resources: + kinds: + - config.kio.kasten.io/v1alpha1/Policy + operations: + - CREATE + - UPDATE + selector: + matchLabels: + appPriority: Mission-Critical + validate: + cel: + expressions: + - expression: "has(object.spec.frequency) && object.spec.frequency == '@hourly'" + message: "Mission Critical RPO frequency should use no shorter than @hourly frequency" + + +``` diff --git a/content/en/policies/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.md b/content/en/policies/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.md new file mode 100644 index 000000000..c117090d4 --- /dev/null +++ b/content/en/policies/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.md @@ -0,0 +1,58 @@ +--- +title: "Validate Data Protection by Preset Label in CEL expressions" +category: Kasten K10 by Veeam in CEL +version: 1.11.0 +subject: Namespace +policyType: "validate" +description: > + Kubernetes applications are typically deployed into a single, logical namespace. Kasten K10 policies will discover and protect all resources within the selected namespace(s). This policy ensures all new namespaces include a label referencing a valid K10 SLA (Policy Preset) for data protection.This policy can be used in combination with generate ClusterPolicy to automatically create a K10 policy based on the specified SLA. The combination ensures that new applications are not inadvertently left unprotected. +--- + +## Policy Definition +/kasten-cel/k10-validate-ns-by-preset-label/k10-validate-ns-by-preset-label.yaml + +```yaml +#NOTE: This example assumes that K10 policy presets named "gold", "silver", and "bronze" have been pre-created and K10 was deployed into the `kasten-io` namespace. +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: k10-validate-ns-by-preset-label + annotations: + policies.kyverno.io/title: Validate Data Protection by Preset Label in CEL expressions + policies.kyverno.io/category: Kasten K10 by Veeam in CEL + policies.kyverno.io/subject: Namespace + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Kubernetes applications are typically deployed into a single, logical namespace. + Kasten K10 policies will discover and protect all resources within the selected namespace(s). + This policy ensures all new namespaces include a label referencing a valid K10 SLA + (Policy Preset) for data protection.This policy can be used in combination with generate + ClusterPolicy to automatically create a K10 policy based on the specified SLA. + The combination ensures that new applications are not inadvertently left unprotected. +spec: + validationFailureAction: Audit + rules: + - name: k10-validate-ns-by-preset-label + match: + any: + - resources: + kinds: + - Namespace + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.metadata.labels) && has(object.metadata.labels.dataprotection) && object.metadata.labels.dataprotection in ['gold', 'silver', 'bronze', 'none']" + message: >- + Namespaces must specify a "dataprotection" label with a value corresponding to a Kasten K10 SLA: + + "gold" - + "silver" - + "bronze" - + "none" - No local snapshots or backups + +``` diff --git a/content/en/policies/kasten/k10-generate-gold-backup-policy/k10-generate-gold-backup-policy.md b/content/en/policies/kasten/k10-generate-gold-backup-policy/k10-generate-gold-backup-policy.md deleted file mode 100644 index 2d5f89298..000000000 --- a/content/en/policies/kasten/k10-generate-gold-backup-policy/k10-generate-gold-backup-policy.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: "Generate Gold Backup Policy" -category: Kasten K10 by Veeam -version: 1.6.2 -subject: Policy -policyType: "generate" -description: > - Generate a backup policy for any Deployment or StatefulSet that adds the labels "dataprotection: k10-goldpolicy" This policy works best to decide the data protection objectives and simply assign backup via application labels. ---- - -## Policy Definition -/kasten/k10-generate-gold-backup-policy/k10-generate-gold-backup-policy.yaml - -```yaml -apiVersion: kyverno.io/v1 -kind: ClusterPolicy -metadata: - name: k10-generate-gold-backup-policy - annotations: - policies.kyverno.io/title: Generate Gold Backup Policy - policies.kyverno.io/category: Kasten K10 by Veeam - kyverno.io/kyverno-version: 1.6.2 - policies.kyverno.io/minversion: 1.6.2 - kyverno.io/kubernetes-version: "1.21-1.22" - policies.kyverno.io/subject: Policy - policies.kyverno.io/description: >- - Generate a backup policy for any Deployment or StatefulSet that adds the labels "dataprotection: k10-goldpolicy" - This policy works best to decide the data protection objectives and simply assign backup via application labels. -spec: - background: false - rules: - - name: k10-generate-gold-backup-policy - match: - any: - - resources: - kinds: - - Deployment - - StatefulSet - selector: - matchLabels: - dataprotection: k10-goldpolicy # match with a corresponding ClusterPolicy that checks for this label - generate: - apiVersion: config.kio.kasten.io/v1alpha1 - kind: Policy - name: k10-{{request.namespace}}-gold-backup-policy - namespace: "{{request.namespace}}" - data: - metadata: - name: k10-{{request.namespace}}-gold-backup-policy - namespace: "{{request.namespace}}" - spec: - comment: K10 "gold" immutable production backup policy - frequency: '@daily' - retention: - daily: 7 - weekly: 4 - monthly: 12 - yearly: 7 - actions: - - action: backup - - action: export - exportParameters: - frequency: '@monthly' - profile: - name: object-lock-s3 - namespace: kasten-io - exportData: - enabled: true - retention: - monthly: 12 - yearly: 5 - selector: - matchExpressions: - - key: k10.kasten.io/appNamespace - operator: In - values: - - "{{request.namespace}}" - -``` diff --git a/content/en/policies/kasten/k10-minimum-retention/k10-minimum-retention.md b/content/en/policies/kasten/k10-minimum-retention/k10-minimum-retention.md deleted file mode 100644 index 954cf0442..000000000 --- a/content/en/policies/kasten/k10-minimum-retention/k10-minimum-retention.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: "Minimum Backup Retention" -category: Kasten K10 by Veeam -version: 1.6.2 -subject: Policy -policyType: "mutate" -description: > - K10 Policy resources can be validated to adhere to common compliance retention standards. Uncomment the regulation/compliance standards you want to enforce for according to GFS retention. This policy deletes the retention value in the backup operation and replaces it with the specified retention. Note: K10 Policy uses the GFS retention scheme and export operations default to use the retention of the backup operation. To use different This policy can also be used go reduce retentions lengths to enforce cost optimization. ---- - -## Policy Definition -/kasten/k10-minimum-retention/k10-minimum-retention.yaml - -```yaml -apiVersion: kyverno.io/v1 -kind: ClusterPolicy -metadata: - name: k10-minimum-retention - annotations: - policies.kyverno.io/title: Minimum Backup Retention - policies.kyverno.io/category: Kasten K10 by Veeam - kyverno.io/kyverno-version: 1.6.2 - policies.kyverno.io/minversion: 1.6.2 - kyverno.io/kubernetes-version: "1.21-1.22" - policies.kyverno.io/subject: Policy - policies.kyverno.io/description: >- - K10 Policy resources can be validated to adhere to common compliance retention standards. - Uncomment the regulation/compliance standards you want to enforce for according to GFS retention. - This policy deletes the retention value in the backup operation and replaces it with the specified retention. - Note: K10 Policy uses the GFS retention scheme and export operations default to use the retention of the backup operation. - To use different - This policy can also be used go reduce retentions lengths to enforce cost optimization. -spec: - schemaValidation: false - rules: - - name: k10-minimum-retention - match: - any: - - resources: - kinds: - - config.kio.kasten.io/v1alpha1/Policy - mutate: - # Federal Information Security Management Act (FISMA): 3 Years - #patchesJson6902: |- - # - path: "/spec/retention" - # op: replace - # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":3} - - # Health Insurance Portability and Accountability Act (HIPAA): 6 Years - #patchesJson6902: |- - # - path: "/spec/retention" - # op: replace - # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":6} - - # National Energy Commission (NERC): 3 to 6 Years - #patchesJson6902: |- - # - path: "/spec/retention" - # op: replace - # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":3} - - # Basel II Capital Accord: 3 to 7 Years - #patchesJson6902: |- - # - path: "/spec/retention" - # op: replace - # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":3} - - # Sarbanes-Oxley Act of 2002 (SOX): 7 Years - #patchesJson6902: |- - # - path: "/spec/retention" - # op: replace - # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":7} - - # National Industrial Security Program Operating Manual (NISPOM): 6 to 12 Months - #patchesJson6902: |- - # - path: "/spec/retention" - # op: replace - # value: {"hourly":24,"daily":30,"weekly":4,"monthly":6} - - # Cost Optimization (Maximum Retention: 3 Months) - patchesJson6902: |- - - path: "/spec/retention" - op: replace - value: - hourly: 24 - daily: 30 - weekly: 4 - monthly: 3 - -``` diff --git a/content/en/policies/kasten/kasten-3-2-1-backup/kasten-3-2-1-backup.md b/content/en/policies/kasten/kasten-3-2-1-backup/kasten-3-2-1-backup.md index b64baceae..6d05f7978 100644 --- a/content/en/policies/kasten/kasten-3-2-1-backup/kasten-3-2-1-backup.md +++ b/content/en/policies/kasten/kasten-3-2-1-backup/kasten-3-2-1-backup.md @@ -5,8 +5,7 @@ version: 1.12.0 subject: Policy policyType: "validate" description: > - The 3-2-1 rule of data protection recommends that you have at least 3 copies of data, on 2 different storage targets, with 1 being offsite. This approach ensures a health mix of redundancy options for data recovery of the application for localized & multi-region cloud failures or compromise. In Kubernetes, this translates to the original running resources, a local snapshot, and a copy of all application resources and volume data exported to an external repository. - This policy accomplishes 3-2-1 validation by ensuring each policy contains both 'action: backup' and 'action: export'. + The 3-2-1 rule of data protection recommends that you have at least 3 copies of data, on 2 different storage targets, with 1 being offsite. This approach ensures a health mix of redundancy options for data recovery of the application for localized & multi-region cloud failures or compromise. In Kubernetes, this translates to the original running resources, a local snapshot, and a copy of all application resources and volume data exported to an external repository. This policy accomplishes 3-2-1 validation by ensuring each policy contains both 'action: backup' and 'action: export'. --- ## Policy Definition @@ -27,7 +26,6 @@ metadata: policies.kyverno.io/subject: Policy policies.kyverno.io/description: >- The 3-2-1 rule of data protection recommends that you have at least 3 copies of data, on 2 different storage targets, with 1 being offsite. This approach ensures a health mix of redundancy options for data recovery of the application for localized & multi-region cloud failures or compromise. In Kubernetes, this translates to the original running resources, a local snapshot, and a copy of all application resources and volume data exported to an external repository. - This policy accomplishes 3-2-1 validation by ensuring each policy contains both 'action: backup' and 'action: export'. spec: validationFailureAction: Audit diff --git a/content/en/policies/kasten/kasten-data-protection-by-label/kasten-data-protection-by-label.md b/content/en/policies/kasten/kasten-data-protection-by-label/kasten-data-protection-by-label.md index 136378ff8..206ab1beb 100644 --- a/content/en/policies/kasten/kasten-data-protection-by-label/kasten-data-protection-by-label.md +++ b/content/en/policies/kasten/kasten-data-protection-by-label/kasten-data-protection-by-label.md @@ -5,8 +5,7 @@ version: 1.6.2 subject: Deployment, StatefulSet policyType: "validate" description: > - Check the 'dataprotection' label for production Deployments and StatefulSet workloads. - Use in combination with 'kasten-generate-example-backup-policy' policy to generate a Kasten policy for the workload namespace, if it doesn't already exist. + Check the 'dataprotection' label for production Deployments and StatefulSet workloads. Use in combination with 'kasten-generate-example-backup-policy' policy to generate a Kasten policy for the workload namespace, if it doesn't already exist. --- ## Policy Definition @@ -26,7 +25,6 @@ metadata: policies.kyverno.io/subject: Deployment, StatefulSet policies.kyverno.io/description: >- Check the 'dataprotection' label for production Deployments and StatefulSet workloads. - Use in combination with 'kasten-generate-example-backup-policy' policy to generate a Kasten policy for the workload namespace, if it doesn't already exist. spec: validationFailureAction: Audit diff --git a/content/en/policies/kasten/kasten-generate-example-backup-policy/kasten-generate-example-backup-policy.md b/content/en/policies/kasten/kasten-generate-example-backup-policy/kasten-generate-example-backup-policy.md new file mode 100644 index 000000000..15e46769e --- /dev/null +++ b/content/en/policies/kasten/kasten-generate-example-backup-policy/kasten-generate-example-backup-policy.md @@ -0,0 +1,113 @@ +--- +title: "Generate Kasten Backup Policy Based on Resource Label" +category: Veeam Kasten +version: 1.12.0 +subject: Policy +policyType: "generate" +description: > + Generates a Kasten policy for a namespace that includes any Deployment or StatefulSet with a "dataprotection=kasten-example" label, if the policy does not already exist. This Kyverno policy can be used in combination with the "kasten-data-protection-by-label" policy to require "dataprotection" labeling on workloads. +--- + +## Policy Definition +/kasten/kasten-generate-example-backup-policy/kasten-generate-example-backup-policy.yaml + +```yaml +# This is an example rule intended to be cloned & modified to meet organizational requirements. +# The `dataprotetion` label value can be changed to correspond with specific policy templates. +# +# NOTE: Use of this policy will require granting the Kyverno background-controller additional privileges required to generate Kasten resources. An example ClusterRole to provide required privileges is provided within the comments of the policy manifest. +# +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: ClusterRole +# metadata: +# labels: +# app.kubernetes.io/component: background-controller +# app.kubernetes.io/instance: kyverno +# app.kubernetes.io/part-of: kyverno +# name: kyverno:create-kasten-policies +# rules: +# - apiGroups: +# - config.kio.kasten.io +# resources: +# - policies +# verbs: +# - create +# - update +# - delete +# +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: kasten-generate-example-backup-policy + annotations: + policies.kyverno.io/title: Generate Kasten Backup Policy Based on Resource Label + policies.kyverno.io/category: Veeam Kasten + kyverno.io/kyverno-version: 1.12.1 + policies.kyverno.io/minversion: 1.12.0 + kyverno.io/kubernetes-version: "1.24-1.30" + policies.kyverno.io/subject: Policy + policies.kyverno.io/description: >- + Generates a Kasten policy for a namespace that includes any Deployment or StatefulSet with a "dataprotection=kasten-example" label, if the policy does not already exist. This Kyverno policy can be used in combination with the "kasten-data-protection-by-label" policy to require "dataprotection" labeling on workloads. +spec: + rules: + - name: kasten-generate-example-backup-policy + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + selector: + matchLabels: + dataprotection: kasten-example + context: + - name: dataprotectionLabelValue + variable: + value: "kasten-example" + - name: kyvernoPolicyName + variable: + value: "kasten-generate-example-backup-policy" + - name: existingPolicy + apiCall: + urlPath: "/apis/config.kio.kasten.io/v1alpha1/namespaces/kasten-io/policies" # returns list of Kasten policies from kasten-io namespace + jmesPath: "items[][[@.metadata.labels.\"generate.kyverno.io/policy-name\"=='{{ kyvernoPolicyName }}'] && [@.spec.selector.matchExpressions[].values[?@=='{{ request.namespace }}']]][][][][] | length(@)" # queries if a Kasten policy protecting the namespace generated by this Kyverno policy already exists + preconditions: + any: + - key: "{{ existingPolicy }}" + operator: Equals + value: 0 # Only generate the policy if it does not already exist + generate: + apiVersion: config.kio.kasten.io/v1alpha1 + kind: Policy + name: "{{ request.namespace }}-{{ dataprotectionLabelValue }}-policy" + namespace: kasten-io + data: + metadata: + name: "{{ request.namespace }}-{{ dataprotectionLabelValue }}-policy" + namespace: kasten-io + spec: + comment: "Auto-generated by Kyverno" + frequency: '@daily' + retention: + daily: 7 + weekly: 4 + monthly: 12 + yearly: 7 + actions: + - action: backup + - action: export + exportParameters: + frequency: '@daily' + profile: + name: test + namespace: kasten-io + exportData: + enabled: true + selector: + matchExpressions: + - key: k10.kasten.io/appNamespace + operator: In + values: + - "{{ request.namespace }}" + +``` diff --git a/content/en/policies/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.md b/content/en/policies/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.md index ebf4abc24..403ab34f2 100644 --- a/content/en/policies/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.md +++ b/content/en/policies/kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.md @@ -5,22 +5,35 @@ version: 1.12.0 subject: Policy policyType: "generate" description: > - Generates a Kasten policy for a new namespace that includes a valid "dataprotection" label, if the policy does not already exist. - Use with "kasten-validate-ns-by-preset-label" policy to require "dataprotection" labeling on new namespaces. + Generates a Kasten policy for a new namespace that includes a valid "dataprotection" label, if the policy does not already exist. Use with "kasten-validate-ns-by-preset-label" policy to require "dataprotection" labeling on new namespaces. --- ## Policy Definition /kasten/kasten-generate-policy-by-preset-label/kasten-generate-policy-by-preset-label.yaml ```yaml -# This example assumes that Kasten policy presets named -# "gold", "silver", and "bronze" have been pre-created -# and Kasten was deployed into the `kasten-io` namespace. +# This example assumes that Kasten policy presets named "gold", "silver", and "bronze" have been pre-created and Kasten was deployed into the `kasten-io` namespace. +# +# NOTE: Use of this policy will require granting the Kyverno background-controller additional privileges required to generate Kasten resources. An example ClusterRole to provide required privileges is provided within the comments of the policy manifest. +# +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: ClusterRole +# metadata: +# labels: +# app.kubernetes.io/component: background-controller +# app.kubernetes.io/instance: kyverno +# app.kubernetes.io/part-of: kyverno +# name: kyverno:create-kasten-policies +# rules: +# - apiGroups: +# - config.kio.kasten.io +# resources: +# - policies +# verbs: +# - create +# - update +# - delete # -# Additionally, the Kyverno background controller requires -# additional permissions to create Kasten Policy resources. -# Apply the create-kasten-policies-clusterrole.yaml manifest -# first to grant the required permissions. apiVersion: kyverno.io/v1 kind: ClusterPolicy metadata: @@ -34,7 +47,6 @@ metadata: kyverno.io/kubernetes-version: "1.24-1.30" policies.kyverno.io/description: >- Generates a Kasten policy for a new namespace that includes a valid "dataprotection" label, if the policy does not already exist. - Use with "kasten-validate-ns-by-preset-label" policy to require "dataprotection" labeling on new namespaces. spec: rules: diff --git a/content/en/policies/kasten/kasten-minimum-retention/kasten-minimum-retention.md b/content/en/policies/kasten/kasten-minimum-retention/kasten-minimum-retention.md new file mode 100644 index 000000000..238538fc4 --- /dev/null +++ b/content/en/policies/kasten/kasten-minimum-retention/kasten-minimum-retention.md @@ -0,0 +1,97 @@ +--- +title: "Set Kasten Policy Minimum Backup Retention" +category: Veeam Kasten +version: 1.6.2 +subject: Policy +policyType: "mutate" +description: > + Example Kyverno policy to enforce common compliance retention standards by modifying Kasten Policy backup retention settings. Based on regulation/compliance standard requirements, uncomment (1) of the desired GFS retention schedules to mutate existing and future Kasten Policies. Alternatively, this policy can be used to reduce retention lengths to enforce cost optimization. NOTE: This example only applies to Kasten Policies with an '@hourly' frequency. Refer to Kasten documentation for Policy API specification if modifications are necessary: https://docs.kasten.io/latest/api/policies.html#policy-api-type +--- + +## Policy Definition +/kasten/kasten-minimum-retention/kasten-minimum-retention.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: kasten-minimum-retention + annotations: + policies.kyverno.io/title: Set Kasten Policy Minimum Backup Retention + policies.kyverno.io/category: Veeam Kasten + kyverno.io/kyverno-version: 1.12.1 + policies.kyverno.io/minversion: 1.6.2 + kyverno.io/kubernetes-version: "1.24-1.30" + policies.kyverno.io/subject: Policy + policies.kyverno.io/description: >- + Example Kyverno policy to enforce common compliance retention standards by modifying Kasten Policy backup retention settings. Based on regulation/compliance standard requirements, uncomment (1) of the desired GFS retention schedules to mutate existing and future Kasten Policies. Alternatively, this policy can be used to reduce retention lengths to enforce cost optimization. NOTE: This example only applies to Kasten Policies with an '@hourly' frequency. Refer to Kasten documentation for Policy API specification if modifications are necessary: https://docs.kasten.io/latest/api/policies.html#policy-api-type +spec: + schemaValidation: false + rules: + - name: kasten-minimum-retention + match: + any: + - resources: + kinds: + - config.kio.kasten.io/v1alpha1/Policy + preconditions: + all: + # Match only @hourly policies that do not use policy presets, as the + # number of retained artifacts can only be specified for frequencies + # of the same or lower granularity than the policy frequency. For example, + # if the policy frequency is '@daily', then retention can have values for + # 'daily', 'weekly', 'monthly' and 'yearly', but not for 'hourly'. + # If the policy frequency is 'hourly', then all retention values are + # allowed. If the policy frequency is '@onDemand' or policy preset is used + # then retention values are not allowed. + - key: "{{ request.object.spec.frequency || ''}}" + operator: Equals + value: '@hourly' + mutate: + # Federal Information Security Management Act (FISMA): 3 Years + #patchesJson6902: |- + # - path: "/spec/retention" + # op: replace + # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":3} + + # Health Insurance Portability and Accountability Act (HIPAA): 6 Years + #patchesJson6902: |- + # - path: "/spec/retention" + # op: replace + # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":6} + + # National Energy Commission (NERC): 3 to 6 Years + #patchesJson6902: |- + # - path: "/spec/retention" + # op: replace + # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":3} + + # Basel II Capital Accord: 3 to 7 Years + #patchesJson6902: |- + # - path: "/spec/retention" + # op: replace + # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":3} + + # Sarbanes-Oxley Act of 2002 (SOX): 7 Years + #patchesJson6902: |- + # - path: "/spec/retention" + # op: replace + # value: {"hourly":24,"daily":30,"weekly":4,"monthly":12,"yearly":7} + + # National Industrial Security Program Operating Manual (NISPOM): 6 to 12 Months + #patchesJson6902: |- + # - path: "/spec/retention" + # op: replace + # value: {"hourly":24,"daily":30,"weekly":4,"monthly":6} + + # Cost Optimization (Maximum Retention: 3 Months) + patchesJson6902: |- + - path: "/spec/retention" + op: replace + value: + hourly: 24 + daily: 30 + weekly: 4 + monthly: 3 + +``` diff --git a/content/en/policies/kubecost-cel/require-kubecost-labels/require-kubecost-labels.md b/content/en/policies/kubecost-cel/require-kubecost-labels/require-kubecost-labels.md new file mode 100644 index 000000000..dacdba17f --- /dev/null +++ b/content/en/policies/kubecost-cel/require-kubecost-labels/require-kubecost-labels.md @@ -0,0 +1,59 @@ +--- +title: "Require Kubecost Labels in CEL expressions" +category: Kubecost in CEL +version: +subject: Pod, Label +policyType: "validate" +description: > + Kubecost can use labels assigned to Pods in order to track and display cost allocation in a granular way. These labels, which can be customized, can be used to organize and group workloads in different ways. This policy requires that the labels `owner`, `team`, `department`, `app`, and `env` are all defined on Pods. With Kyverno autogen enabled (absence of the annotation `pod-policies.kyverno.io/autogen-controllers=none`), these labels will also be required for all Pod controllers. +--- + +## Policy Definition +/kubecost-cel/require-kubecost-labels/require-kubecost-labels.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-kubecost-labels + annotations: + policies.kyverno.io/title: Require Kubecost Labels in CEL expressions + policies.kyverno.io/category: Kubecost in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod, Label + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Kubecost can use labels assigned to Pods in order to track and display + cost allocation in a granular way. These labels, which can be customized, can be used + to organize and group workloads in different ways. This policy requires that the labels + `owner`, `team`, `department`, `app`, and `env` are all defined on Pods. With Kyverno + autogen enabled (absence of the annotation `pod-policies.kyverno.io/autogen-controllers=none`), + these labels will also be required for all Pod controllers. +spec: + validationFailureAction: Audit + background: true + rules: + - name: require-labels + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.metadata.labels) && + has(object.metadata.labels.owner) && object.metadata.labels.owner != '' && + has(object.metadata.labels.team) && object.metadata.labels.team != '' && + has(object.metadata.labels.department) && object.metadata.labels.department != '' && + has(object.metadata.labels.app) && object.metadata.labels.app != '' && + has(object.metadata.labels.env) && object.metadata.labels.env != '' + message: "The Kubecost labels `owner`, `team`, `department`, `app`, and `env` are all required for Pods." + + +``` diff --git a/content/en/policies/kubeops/config-syncer-secret-generation-from-rancher-capi/config-syncer-secret-generation-from-rancher-capi.md b/content/en/policies/kubeops/config-syncer-secret-generation-from-rancher-capi/config-syncer-secret-generation-from-rancher-capi.md index e203b5245..15e3d342a 100644 --- a/content/en/policies/kubeops/config-syncer-secret-generation-from-rancher-capi/config-syncer-secret-generation-from-rancher-capi.md +++ b/content/en/policies/kubeops/config-syncer-secret-generation-from-rancher-capi/config-syncer-secret-generation-from-rancher-capi.md @@ -30,7 +30,7 @@ metadata: required by the Kubeops Config Syncer for it to sync ConfigMaps/Secrets from the Rancher management cluster to downstream clusters. spec: - generateExistingOnPolicyUpdate: true + generateExisting: true rules: - name: source-rancher-non-local-cluster-and-capi-secret match: diff --git a/content/en/policies/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.md b/content/en/policies/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.md new file mode 100644 index 000000000..10ae9d1d5 --- /dev/null +++ b/content/en/policies/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.md @@ -0,0 +1,70 @@ +--- +title: "Advanced Restrict Image Registries in CEL expressions" +category: Other in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + In instances where a ClusterPolicy defines all the approved image registries is insufficient, more granular control may be needed to set permitted registries, especially in multi-tenant use cases where some registries may be based on the Namespace. This policy shows an advanced version of the Restrict Image Registries policy which gets a global approved registry from a ConfigMap and, based upon an annotation at the Namespace level, gets the registry approved for that Namespace. +--- + +## Policy Definition +/other-cel/advanced-restrict-image-registries/advanced-restrict-image-registries.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: advanced-restrict-image-registries + annotations: + policies.kyverno.io/title: Advanced Restrict Image Registries in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + In instances where a ClusterPolicy defines all the approved image registries + is insufficient, more granular control may be needed to set permitted registries, + especially in multi-tenant use cases where some registries may be based on + the Namespace. This policy shows an advanced version of the Restrict Image Registries + policy which gets a global approved registry from a ConfigMap and, based upon an + annotation at the Namespace level, gets the registry approved for that Namespace. +spec: + validationFailureAction: Audit + background: false + rules: + - name: validate-corp-registries + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: clusterregistries + namespace: default + parameterNotFoundAction: Deny + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + - name: nsregistries + expression: >- + (has(namespaceObject.metadata.annotations) && 'corp.com/allowed-registries' in namespaceObject.metadata.annotations) ? + namespaceObject.metadata.annotations['corp.com/allowed-registries'] : ' ' + - name: clusterregistries + expression: "'registries' in params.data ? params.data['registries'] : ' '" + expressions: + - expression: "variables.allContainers.all(container, container.image.startsWith(variables.nsregistries) || container.image.startsWith(variables.clusterregistries))" + message: This Pod names an image that is not from an approved registry. + + +``` diff --git a/content/en/policies/other-cel/allowed-annotations/allowed-annotations.md b/content/en/policies/other-cel/allowed-annotations/allowed-annotations.md index 5a266d64c..51c15a6dd 100644 --- a/content/en/policies/other-cel/allowed-annotations/allowed-annotations.md +++ b/content/en/policies/other-cel/allowed-annotations/allowed-annotations.md @@ -39,6 +39,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/allowed-pod-priorities/allowed-pod-priorities.md b/content/en/policies/other-cel/allowed-pod-priorities/allowed-pod-priorities.md index 697d3c9a2..0ce6b5984 100644 --- a/content/en/policies/other-cel/allowed-pod-priorities/allowed-pod-priorities.md +++ b/content/en/policies/other-cel/allowed-pod-priorities/allowed-pod-priorities.md @@ -39,6 +39,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: paramKind: diff --git a/content/en/policies/other-cel/block-ephemeral-containers/block-ephemeral-containers.md b/content/en/policies/other-cel/block-ephemeral-containers/block-ephemeral-containers.md index aeb24b102..1c19fa7d5 100644 --- a/content/en/policies/other-cel/block-ephemeral-containers/block-ephemeral-containers.md +++ b/content/en/policies/other-cel/block-ephemeral-containers/block-ephemeral-containers.md @@ -39,6 +39,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/check-env-vars/check-env-vars.md b/content/en/policies/other-cel/check-env-vars/check-env-vars.md index eea533c57..6e1493d0b 100644 --- a/content/en/policies/other-cel/check-env-vars/check-env-vars.md +++ b/content/en/policies/other-cel/check-env-vars/check-env-vars.md @@ -39,6 +39,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/check-node-for-cve-2022-0185/check-node-for-cve-2022-0185.md b/content/en/policies/other-cel/check-node-for-cve-2022-0185/check-node-for-cve-2022-0185.md index ea40fc216..5cbbe8586 100644 --- a/content/en/policies/other-cel/check-node-for-cve-2022-0185/check-node-for-cve-2022-0185.md +++ b/content/en/policies/other-cel/check-node-for-cve-2022-0185/check-node-for-cve-2022-0185.md @@ -40,6 +40,9 @@ spec: - resources: kinds: - Node + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/check-serviceaccount-secrets/check-serviceaccount-secrets.md b/content/en/policies/other-cel/check-serviceaccount-secrets/check-serviceaccount-secrets.md index 22c0756c3..fa2e79b2e 100644 --- a/content/en/policies/other-cel/check-serviceaccount-secrets/check-serviceaccount-secrets.md +++ b/content/en/policies/other-cel/check-serviceaccount-secrets/check-serviceaccount-secrets.md @@ -40,6 +40,9 @@ spec: - resources: kinds: - ServiceAccount + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.md b/content/en/policies/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.md new file mode 100644 index 000000000..a3b4b169e --- /dev/null +++ b/content/en/policies/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.md @@ -0,0 +1,61 @@ +--- +title: "Deny Commands in Exec Probe in CEL expressions" +category: Other in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Developers may feel compelled to use simple shell commands as a workaround to creating "proper" liveness or readiness probes for a Pod. Such a practice can be discouraged via detection of those commands. This policy prevents the use of certain commands `jcmd`, `ps`, or `ls` if found in a Pod's liveness exec probe. +--- + +## Policy Definition +/other-cel/deny-commands-in-exec-probe/deny-commands-in-exec-probe.yaml + +```yaml +apiVersion: kyverno.io/v2beta1 +kind: ClusterPolicy +metadata: + name: deny-commands-in-exec-probe + annotations: + policies.kyverno.io/title: Deny Commands in Exec Probe in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Developers may feel compelled to use simple shell commands as a workaround to + creating "proper" liveness or readiness probes for a Pod. Such a practice can be discouraged + via detection of those commands. This policy prevents the use of certain commands + `jcmd`, `ps`, or `ls` if found in a Pod's liveness exec probe. +spec: + validationFailureAction: Audit + background: false + rules: + - name: check-commands + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "check-liveness-probes-commands-exist" + expression: >- + object.spec.containers.exists(container, + has(container.livenessProbe) && has(container.livenessProbe.exec) && + size(container.livenessProbe.exec.command) > 0) + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + !has(container.livenessProbe) || !has(container.livenessProbe.exec) || + !container.livenessProbe.exec.command.exists(command, + command.matches('\\bjcmd\\b') || command.matches('\\bps\\b') || command.matches('\\bls\\b'))) + message: Cannot use commands `jcmd`, `ps`, or `ls` in liveness probes. + + +``` diff --git a/content/en/policies/other-cel/deny-secret-service-account-token-type/deny-secret-service-account-token-type.md b/content/en/policies/other-cel/deny-secret-service-account-token-type/deny-secret-service-account-token-type.md index d8fb36795..71c39ccb7 100644 --- a/content/en/policies/other-cel/deny-secret-service-account-token-type/deny-secret-service-account-token-type.md +++ b/content/en/policies/other-cel/deny-secret-service-account-token-type/deny-secret-service-account-token-type.md @@ -39,6 +39,9 @@ spec: - resources: kinds: - Secret + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/disallow-all-secrets/disallow-all-secrets.md b/content/en/policies/other-cel/disallow-all-secrets/disallow-all-secrets.md index 28bc0cff4..b68cdb3d3 100644 --- a/content/en/policies/other-cel/disallow-all-secrets/disallow-all-secrets.md +++ b/content/en/policies/other-cel/disallow-all-secrets/disallow-all-secrets.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/content/en/policies/other-cel/disallow-localhost-services/disallow-localhost-services.md b/content/en/policies/other-cel/disallow-localhost-services/disallow-localhost-services.md index 3cf90f461..b5636d5cc 100644 --- a/content/en/policies/other-cel/disallow-localhost-services/disallow-localhost-services.md +++ b/content/en/policies/other-cel/disallow-localhost-services/disallow-localhost-services.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Service + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/disallow-secrets-from-env-vars/disallow-secrets-from-env-vars.md b/content/en/policies/other-cel/disallow-secrets-from-env-vars/disallow-secrets-from-env-vars.md index 650b4fc03..602ce80b7 100644 --- a/content/en/policies/other-cel/disallow-secrets-from-env-vars/disallow-secrets-from-env-vars.md +++ b/content/en/policies/other-cel/disallow-secrets-from-env-vars/disallow-secrets-from-env-vars.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/docker-socket-requires-label/docker-socket-requires-label.md b/content/en/policies/other-cel/docker-socket-requires-label/docker-socket-requires-label.md index 2ea3c4fad..8286bc34e 100644 --- a/content/en/policies/other-cel/docker-socket-requires-label/docker-socket-requires-label.md +++ b/content/en/policies/other-cel/docker-socket-requires-label/docker-socket-requires-label.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/content/en/policies/other-cel/enforce-pod-duration/enforce-pod-duration.md b/content/en/policies/other-cel/enforce-pod-duration/enforce-pod-duration.md index a5735eb33..67748edfa 100644 --- a/content/en/policies/other-cel/enforce-pod-duration/enforce-pod-duration.md +++ b/content/en/policies/other-cel/enforce-pod-duration/enforce-pod-duration.md @@ -36,6 +36,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/content/en/policies/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.md b/content/en/policies/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.md index e2c4658d5..0236f2a0d 100644 --- a/content/en/policies/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.md +++ b/content/en/policies/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.md @@ -39,6 +39,9 @@ spec: - resources: kinds: - PersistentVolumeClaim + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/ensure-probes-different/ensure-probes-different.md b/content/en/policies/other-cel/ensure-probes-different/ensure-probes-different.md index e1428f2c6..e753bd70c 100644 --- a/content/en/policies/other-cel/ensure-probes-different/ensure-probes-different.md +++ b/content/en/policies/other-cel/ensure-probes-different/ensure-probes-different.md @@ -41,6 +41,9 @@ spec: - Deployment - DaemonSet - StatefulSet + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.md b/content/en/policies/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.md index 38919bc30..91bb3b096 100644 --- a/content/en/policies/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.md +++ b/content/en/policies/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.md @@ -41,6 +41,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/content/en/policies/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.md b/content/en/policies/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.md new file mode 100644 index 000000000..da9d7b1d7 --- /dev/null +++ b/content/en/policies/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.md @@ -0,0 +1,125 @@ +--- +title: "Exclude Namespaces Dynamically in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Namespace, Pod +policyType: "validate" +description: > + It's common where policy lookups need to consider a mapping to many possible values rather than a static mapping. This is a sample which demonstrates how to dynamically look up an allow list of Namespaces from a ConfigMap where the ConfigMap stores an array of strings. This policy validates that any Pods created outside of the list of Namespaces have the label `foo` applied. +--- + +## Policy Definition +/other-cel/exclude-namespaces-dynamically/exclude-namespaces-dynamically.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: exclude-namespaces-example + annotations: + policies.kyverno.io/title: Exclude Namespaces Dynamically in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Namespace, Pod + policies.kyverno.io/minversion: 1.11.0 + pod-policies.kyverno.io/autogen-controllers: none + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + It's common where policy lookups need to consider a mapping to many possible values rather than a + static mapping. This is a sample which demonstrates how to dynamically look up an allow list of Namespaces from a ConfigMap + where the ConfigMap stores an array of strings. This policy validates that any Pods created + outside of the list of Namespaces have the label `foo` applied. +spec: + validationFailureAction: Audit + background: true + rules: + - name: exclude-namespaces-dynamically + match: + any: + - resources: + kinds: + - Deployment + - DaemonSet + - StatefulSet + - Job + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: "has(object.spec.template.metadata) && has(object.spec.template.metadata.labels) && 'foo' in object.spec.template.metadata.labels" + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + - name: exclude-namespaces-dynamically-pods + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: "has(object.metadata.labels) && 'foo' in object.metadata.labels" + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces ' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + - name: exclude-namespaces-dynamically-cronjobs + match: + any: + - resources: + kinds: + - CronJob + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "filter-namespaces" + expression: "!(request.namespace in params.data['exclude'].split(', '))" + validate: + cel: + paramKind: + apiVersion: v1 + kind: ConfigMap + paramRef: + name: namespace-filters + namespace: default + parameterNotFoundAction: Deny + expressions: + - expression: >- + has(object.spec.jobTemplate.spec.template.metadata) && + has(object.spec.jobTemplate.spec.template.metadata.labels) && 'foo' in object.spec.jobTemplate.spec.template.metadata.labels + messageExpression: > + 'Creating Pods in the ' + request.namespace + ' namespace,' + + ' which is not in the excluded list of namespaces ' + params.data.exclude + ',' + + ' is forbidden unless it carries the label `foo`.' + + +``` diff --git a/content/en/policies/other-cel/forbid-cpu-limits/forbid-cpu-limits.md b/content/en/policies/other-cel/forbid-cpu-limits/forbid-cpu-limits.md index b1e8ba2d4..ffebf4a72 100644 --- a/content/en/policies/other-cel/forbid-cpu-limits/forbid-cpu-limits.md +++ b/content/en/policies/other-cel/forbid-cpu-limits/forbid-cpu-limits.md @@ -36,6 +36,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/imagepullpolicy-always/imagepullpolicy-always.md b/content/en/policies/other-cel/imagepullpolicy-always/imagepullpolicy-always.md index 10dcced24..468fc6b3e 100644 --- a/content/en/policies/other-cel/imagepullpolicy-always/imagepullpolicy-always.md +++ b/content/en/policies/other-cel/imagepullpolicy-always/imagepullpolicy-always.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/ingress-host-match-tls/ingress-host-match-tls.md b/content/en/policies/other-cel/ingress-host-match-tls/ingress-host-match-tls.md index 287504c49..3c6d78e8b 100644 --- a/content/en/policies/other-cel/ingress-host-match-tls/ingress-host-match-tls.md +++ b/content/en/policies/other-cel/ingress-host-match-tls/ingress-host-match-tls.md @@ -40,6 +40,9 @@ spec: - resources: kinds: - Ingress + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/content/en/policies/other-cel/limit-containers-per-pod/limit-containers-per-pod.md b/content/en/policies/other-cel/limit-containers-per-pod/limit-containers-per-pod.md index e1307b861..d9fca48b0 100644 --- a/content/en/policies/other-cel/limit-containers-per-pod/limit-containers-per-pod.md +++ b/content/en/policies/other-cel/limit-containers-per-pod/limit-containers-per-pod.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.md b/content/en/policies/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.md index 7d2aab77a..2f3486e05 100644 --- a/content/en/policies/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.md +++ b/content/en/policies/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - PersistentVolume + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/limit-hostpath-vols/limit-hostpath-vols.md b/content/en/policies/other-cel/limit-hostpath-vols/limit-hostpath-vols.md new file mode 100644 index 000000000..ba7f09ac2 --- /dev/null +++ b/content/en/policies/other-cel/limit-hostpath-vols/limit-hostpath-vols.md @@ -0,0 +1,57 @@ +--- +title: "Limit hostPath Volumes to Specific Directories in CEL expressions" +category: Other in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + hostPath volumes consume the underlying node's file system. If hostPath volumes are not to be universally disabled, they should be restricted to only certain host paths so as not to allow access to sensitive information. This policy ensures the only directory that can be mounted as a hostPath volume is /data. It is strongly recommended to pair this policy with a second to ensure readOnly access is enforced preventing directory escape. +--- + +## Policy Definition +/other-cel/limit-hostpath-vols/limit-hostpath-vols.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-hostpath-vols + annotations: + policies.kyverno.io/title: Limit hostPath Volumes to Specific Directories in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + hostPath volumes consume the underlying node's file system. If hostPath volumes + are not to be universally disabled, they should be restricted to only certain + host paths so as not to allow access to sensitive information. This policy ensures + the only directory that can be mounted as a hostPath volume is /data. It is strongly + recommended to pair this policy with a second to ensure readOnly + access is enforced preventing directory escape. +spec: + background: false + validationFailureAction: Audit + rules: + - name: limit-hostpath-to-slash-data + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "has-host-path-volume" + expression: "has(object.spec.volumes) && object.spec.volumes.exists(volume, has(volume.hostPath))" + validate: + cel: + expressions: + - expression: "object.spec.volumes.all(volume, !has(volume.hostPath) || volume.hostPath.path.split('/')[1] == 'data')" + message: hostPath volumes are confined to /data. + + +``` diff --git a/content/en/policies/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.md b/content/en/policies/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.md index eeebc5a53..68ee34b36 100644 --- a/content/en/policies/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.md +++ b/content/en/policies/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: variables: diff --git a/content/en/policies/other-cel/metadata-match-regex/metadata-match-regex.md b/content/en/policies/other-cel/metadata-match-regex/metadata-match-regex.md index 7e2445396..ecf9999c3 100644 --- a/content/en/policies/other-cel/metadata-match-regex/metadata-match-regex.md +++ b/content/en/policies/other-cel/metadata-match-regex/metadata-match-regex.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/pdb-maxunavailable/pdb-maxunavailable.md b/content/en/policies/other-cel/pdb-maxunavailable/pdb-maxunavailable.md index 590db2ee7..3aa9657c5 100644 --- a/content/en/policies/other-cel/pdb-maxunavailable/pdb-maxunavailable.md +++ b/content/en/policies/other-cel/pdb-maxunavailable/pdb-maxunavailable.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - PodDisruptionBudget + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/prevent-bare-pods/prevent-bare-pods.md b/content/en/policies/other-cel/prevent-bare-pods/prevent-bare-pods.md index 53d7d33b5..966211f15 100644 --- a/content/en/policies/other-cel/prevent-bare-pods/prevent-bare-pods.md +++ b/content/en/policies/other-cel/prevent-bare-pods/prevent-bare-pods.md @@ -40,6 +40,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/prevent-cr8escape/prevent-cr8escape.md b/content/en/policies/other-cel/prevent-cr8escape/prevent-cr8escape.md index f6aeece9c..b9279c18f 100644 --- a/content/en/policies/other-cel/prevent-cr8escape/prevent-cr8escape.md +++ b/content/en/policies/other-cel/prevent-cr8escape/prevent-cr8escape.md @@ -39,6 +39,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/other-cel/require-annotations/require-annotations.md b/content/en/policies/other-cel/require-annotations/require-annotations.md new file mode 100644 index 000000000..041426973 --- /dev/null +++ b/content/en/policies/other-cel/require-annotations/require-annotations.md @@ -0,0 +1,53 @@ +--- +title: "Require Annotations in CEL expressions" +category: Other in CEL +version: +subject: Pod, Annotation +policyType: "validate" +description: > + Define and use annotations that identify semantic attributes of your application or Deployment. A common set of annotations allows tools to work collaboratively, describing objects in a common manner that all tools can understand. The recommended annotations describe applications in a way that can be queried. This policy validates that the annotation `corp.org/department` is specified with some value. +--- + +## Policy Definition +/other-cel/require-annotations/require-annotations.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-annotations + annotations: + policies.kyverno.io/title: Require Annotations in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod, Annotation + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Define and use annotations that identify semantic attributes of your application or Deployment. + A common set of annotations allows tools to work collaboratively, describing objects in a common manner that + all tools can understand. The recommended annotations describe applications in a way that can be + queried. This policy validates that the annotation `corp.org/department` is specified with some value. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-for-annotation + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.metadata.annotations) && + 'corp.org/department' in object.metadata.annotations && object.metadata.annotations['corp.org/department'] != '' + message: "The annotation `corp.org/department` is required." + + +``` diff --git a/content/en/policies/other-cel/require-container-port-names/require-container-port-names.md b/content/en/policies/other-cel/require-container-port-names/require-container-port-names.md new file mode 100644 index 000000000..4e7024053 --- /dev/null +++ b/content/en/policies/other-cel/require-container-port-names/require-container-port-names.md @@ -0,0 +1,52 @@ +--- +title: "Require Container Port Names in CEL expressions" +category: Other in CEL +version: +subject: Pod +policyType: "validate" +description: > + Containers may define ports on which they listen. In addition to a port number, a name field may optionally be used. Including a name makes it easier when defining Service resource definitions and others since the name may be referenced allowing the port number to change. This policy requires that for every containerPort defined there is also a name specified. +--- + +## Policy Definition +/other-cel/require-container-port-names/require-container-port-names.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-container-port-names + annotations: + policies.kyverno.io/title: Require Container Port Names in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Containers may define ports on which they listen. In addition to a port number, + a name field may optionally be used. Including a name makes it easier when defining + Service resource definitions and others since the name may be referenced allowing + the port number to change. This policy requires that for every containerPort defined + there is also a name specified. +spec: + validationFailureAction: Audit + background: true + rules: + - name: port-name + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.containers.all(container, !has(container.ports) || container.ports.all(port, has(port.name)))" + message: Name is required for every containerPort. + + +``` diff --git a/content/en/policies/other-cel/require-deployments-have-multiple-replicas/require-deployments-have-multiple-replicas.md b/content/en/policies/other-cel/require-deployments-have-multiple-replicas/require-deployments-have-multiple-replicas.md new file mode 100644 index 000000000..b320838b5 --- /dev/null +++ b/content/en/policies/other-cel/require-deployments-have-multiple-replicas/require-deployments-have-multiple-replicas.md @@ -0,0 +1,50 @@ +--- +title: "Require Multiple Replicas in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Deployment +policyType: "validate" +description: > + Deployments with a single replica cannot be highly available and thus the application may suffer downtime if that one replica goes down. This policy validates that Deployments have more than one replica. +--- + +## Policy Definition +/other-cel/require-deployments-have-multiple-replicas/require-deployments-have-multiple-replicas.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: deployment-has-multiple-replicas + annotations: + policies.kyverno.io/title: Require Multiple Replicas in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Deployment + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Deployments with a single replica cannot be highly available and thus the application + may suffer downtime if that one replica goes down. This policy validates that Deployments + have more than one replica. +spec: + validationFailureAction: Audit + background: true + rules: + - name: deployment-has-multiple-replicas + match: + any: + - resources: + kinds: + - Deployment + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.replicas > 1" + message: "Deployments should have more than one replica to ensure availability." + + +``` diff --git a/content/en/policies/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.md b/content/en/policies/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.md new file mode 100644 index 000000000..26f103119 --- /dev/null +++ b/content/en/policies/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.md @@ -0,0 +1,67 @@ +--- +title: "Require Requests and Limits for emptyDir in CEL expressions" +category: Other in CEL +version: +subject: Pod +policyType: "validate" +description: > + Pods which mount emptyDir volumes may be allowed to potentially overrun the medium backing the emptyDir volume. This sample ensures that any initContainers or containers mounting an emptyDir volume have ephemeral-storage requests and limits set. Policy will be skipped if the volume has already a sizeLimit set. +--- + +## Policy Definition +/other-cel/require-emptydir-requests-limits/require-emptydir-requests-limits.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-emptydir-requests-and-limits + annotations: + policies.kyverno.io/title: Require Requests and Limits for emptyDir in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Pods which mount emptyDir volumes may be allowed to potentially overrun + the medium backing the emptyDir volume. This sample ensures that any + initContainers or containers mounting an emptyDir volume have + ephemeral-storage requests and limits set. Policy will be skipped if + the volume has already a sizeLimit set. +spec: + background: false + validationFailureAction: Audit + rules: + - name: check-emptydir-requests-limits + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "has-emptydir-volume" + expression: "has(object.spec.volumes) && object.spec.volumes.exists(volume, has(volume.emptyDir))" + validate: + cel: + variables: + - name: containers + expression: "object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : [])" + - name: emptydirnames + expression: >- + has(object.spec.volumes) ? + object.spec.volumes.filter(volume, has(volume.emptyDir) && !has(volume.emptyDir.sizeLimit)).map(volume, volume.name) : [] + expressions: + - expression: >- + variables.containers.all(container, + !has(container.volumeMounts) || + !container.volumeMounts.exists(mount, mount.name in variables.emptydirnames) || + container.resources.?requests[?'ephemeral-storage'].hasValue() && + container.resources.?limits[?'ephemeral-storage'].hasValue()) + message: Containers mounting emptyDir volumes must specify requests and limits for ephemeral-storage. + + +``` diff --git a/content/en/policies/other-cel/require-image-checksum/require-image-checksum.md b/content/en/policies/other-cel/require-image-checksum/require-image-checksum.md new file mode 100644 index 000000000..1a76a03bd --- /dev/null +++ b/content/en/policies/other-cel/require-image-checksum/require-image-checksum.md @@ -0,0 +1,50 @@ +--- +title: "Require Images Use Checksums in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Use of a SHA checksum when pulling an image is often preferable because tags are mutable and can be overwritten. This policy checks to ensure that all images use SHA checksums rather than tags. +--- + +## Policy Definition +/other-cel/require-image-checksum/require-image-checksum.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-image-checksum + annotations: + policies.kyverno.io/title: Require Images Use Checksums in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Use of a SHA checksum when pulling an image is often preferable because tags + are mutable and can be overwritten. This policy checks to ensure that all images + use SHA checksums rather than tags. +spec: + validationFailureAction: Audit + background: true + rules: + - name: require-image-checksum + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.containers.all(container, container.image.contains('@'))" + message: "Images must use checksums rather than tags." + + +``` diff --git a/content/en/policies/other-cel/require-ingress-https/require-ingress-https.md b/content/en/policies/other-cel/require-ingress-https/require-ingress-https.md new file mode 100644 index 000000000..ebf741031 --- /dev/null +++ b/content/en/policies/other-cel/require-ingress-https/require-ingress-https.md @@ -0,0 +1,68 @@ +--- +title: "Require Ingress HTTPS in CEL expressions" +category: Other in CEL +version: +subject: Ingress +policyType: "validate" +description: > + Ingress resources should only allow secure traffic by disabling HTTP and therefore only allowing HTTPS. This policy requires that all Ingress resources set the annotation `kubernetes.io/ingress.allow-http` to `"false"` and specify TLS in the spec. +--- + +## Policy Definition +/other-cel/require-ingress-https/require-ingress-https.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-ingress-https + annotations: + policies.kyverno.io/title: Require Ingress HTTPS in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Ingress + policies.kyverno.io/description: >- + Ingress resources should only allow secure traffic by disabling + HTTP and therefore only allowing HTTPS. This policy requires that all + Ingress resources set the annotation `kubernetes.io/ingress.allow-http` to + `"false"` and specify TLS in the spec. +spec: + background: true + validationFailureAction: Audit + rules: + - name: has-annotation + match: + any: + - resources: + kinds: + - Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.metadata.annotations) && + 'kubernetes.io/ingress.allow-http' in object.metadata.annotations && + object.metadata.annotations['kubernetes.io/ingress.allow-http'] == 'false' + message: "The kubernetes.io/ingress.allow-http annotation must be set to false." + - name: has-tls + match: + any: + - resources: + kinds: + - Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.tls)" + message: "TLS must be defined." + + +``` diff --git a/content/en/policies/other-cel/require-non-root-groups/require-non-root-groups.md b/content/en/policies/other-cel/require-non-root-groups/require-non-root-groups.md new file mode 100644 index 000000000..985b44457 --- /dev/null +++ b/content/en/policies/other-cel/require-non-root-groups/require-non-root-groups.md @@ -0,0 +1,104 @@ +--- +title: "Require Non-Root Groups in CEL expressions" +category: Sample, EKS Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Containers should be forbidden from running with a root primary or supplementary GID. This policy ensures the `runAsGroup`, `supplementalGroups`, and `fsGroup` fields are set to a number greater than zero (i.e., non root). A known issue prevents a policy such as this using `anyPattern` from being persisted properly in Kubernetes 1.23.0-1.23.2. +--- + +## Policy Definition +/other-cel/require-non-root-groups/require-non-root-groups.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-non-root-groups + annotations: + policies.kyverno.io/title: Require Non-Root Groups in CEL expressions + policies.kyverno.io/category: Sample, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Containers should be forbidden from running with a root primary or supplementary GID. + This policy ensures the `runAsGroup`, `supplementalGroups`, and `fsGroup` fields are set to a number + greater than zero (i.e., non root). A known issue prevents a policy such as this + using `anyPattern` from being persisted properly in Kubernetes 1.23.0-1.23.2. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-runasgroup + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: >- + ( + has(object.spec.securityContext) && has(object.spec.securityContext.runAsGroup) && (object.spec.securityContext.runAsGroup > 0) && + variables.allContainers.all(container, !has(container.securityContext) || !has(container.securityContext.runAsGroup) || container.securityContext.runAsGroup > 0) + ) || + ( + variables.allContainers.all(container, has(container.securityContext) && has(container.securityContext.runAsGroup) && container.securityContext.runAsGroup > 0) + ) + message: >- + Running with root group IDs is disallowed. The fields + spec.securityContext.runAsGroup, spec.containers[*].securityContext.runAsGroup, + spec.initContainers[*].securityContext.runAsGroup, and + spec.ephemeralContainers[*].securityContext.runAsGroup must be + set to a value greater than zero. + - name: check-supplementalgroups + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.securityContext) || !has(object.spec.securityContext.supplementalGroups) || + object.spec.securityContext.supplementalGroups.all(group, group > 0) + message: >- + Containers cannot run with a root primary or supplementary GID. The field + spec.securityContext.supplementalGroups must be unset or + set to a value greater than zero. + - name: check-fsgroup + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.securityContext) || !has(object.spec.securityContext.fsGroup) || + object.spec.securityContext.fsGroup > 0 + message: >- + Containers cannot run with a root primary or supplementary GID. The field + spec.securityContext.fsGroup must be unset or set to a value greater than zero. + + +``` diff --git a/content/en/policies/other-cel/require-pod-priorityclassname/require-pod-priorityclassname.md b/content/en/policies/other-cel/require-pod-priorityclassname/require-pod-priorityclassname.md new file mode 100644 index 000000000..4c583ba7d --- /dev/null +++ b/content/en/policies/other-cel/require-pod-priorityclassname/require-pod-priorityclassname.md @@ -0,0 +1,53 @@ +--- +title: "Require Pod priorityClassName in CEL expressions" +category: Multi-Tenancy, EKS Best Practices in CEL +version: +subject: Pod +policyType: "validate" +description: > + A Pod may optionally specify a priorityClassName which indicates the scheduling priority relative to others. This requires creation of a PriorityClass object in advance. With this created, a Pod may set this field to that value. In a multi-tenant environment, it is often desired to require this priorityClassName be set to make certain tenant scheduling guarantees. This policy requires that a Pod defines the priorityClassName field with some value. +--- + +## Policy Definition +/other-cel/require-pod-priorityclassname/require-pod-priorityclassname.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-pod-priorityclassname + annotations: + policies.kyverno.io/title: Require Pod priorityClassName in CEL expressions + policies.kyverno.io/category: Multi-Tenancy, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + A Pod may optionally specify a priorityClassName which indicates the scheduling + priority relative to others. This requires creation of a PriorityClass object in advance. + With this created, a Pod may set this field to that value. In a multi-tenant environment, + it is often desired to require this priorityClassName be set to make certain tenant + scheduling guarantees. This policy requires that a Pod defines the priorityClassName field + with some value. +spec: + validationFailureAction: Audit + background: true + rules: + - name: check-priorityclassname + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.priorityClassName) && object.spec.priorityClassName != ''" + message: "Pods must define the priorityClassName field." + + +``` diff --git a/content/en/policies/other-cel/require-qos-burstable/require-qos-burstable.md b/content/en/policies/other-cel/require-qos-burstable/require-qos-burstable.md new file mode 100644 index 000000000..1d9e012c2 --- /dev/null +++ b/content/en/policies/other-cel/require-qos-burstable/require-qos-burstable.md @@ -0,0 +1,57 @@ +--- +title: "Require QoS Burstable in CEL expressions" +category: Other, Multi-Tenancy in CEL +version: +subject: Pod +policyType: "validate" +description: > + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain priority guarantees based upon the resources they define. When a Pod has at least one container which defines either requests or limits for either memory or CPU, Kubernetes grants the QoS class as burstable if it does not otherwise qualify for a QoS class of guaranteed. This policy requires that a Pod meet the criteria qualify for a QoS of burstable. This policy is provided with the intention that users will need to control its scope by using exclusions, preconditions, and other policy language mechanisms. +--- + +## Policy Definition +/other-cel/require-qos-burstable/require-qos-burstable.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-qos-burstable + annotations: + policies.kyverno.io/title: Require QoS Burstable in CEL expressions + policies.kyverno.io/category: Other, Multi-Tenancy in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain + priority guarantees based upon the resources they define. When a Pod has at least + one container which defines either requests or limits for either memory or CPU, + Kubernetes grants the QoS class as burstable if it does not otherwise qualify for a QoS class of guaranteed. + This policy requires that a Pod meet the criteria qualify for a QoS of burstable. + This policy is provided with the intention that users will need to control its scope by using + exclusions, preconditions, and other policy language mechanisms. +spec: + validationFailureAction: Audit + background: true + rules: + - name: burstable + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + object.spec.containers.exists(container, + has(container.resources) && + (has(container.resources.requests) || has(container.resources.limits))) + message: "At least one container in the Pod must define either requests or limits for either CPU or memory." + + +``` diff --git a/content/en/policies/other-cel/require-qos-guaranteed/require-qos-guaranteed.md b/content/en/policies/other-cel/require-qos-guaranteed/require-qos-guaranteed.md new file mode 100644 index 000000000..b2c5fa169 --- /dev/null +++ b/content/en/policies/other-cel/require-qos-guaranteed/require-qos-guaranteed.md @@ -0,0 +1,63 @@ +--- +title: "Require QoS Guaranteed in CEL expressions" +category: Other, Multi-Tenancy in CEL +version: +subject: Pod +policyType: "validate" +description: > + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain priority guarantees based upon the resources they define. When Pods define both requests and limits for both memory and CPU, and the requests and limits are equal to each other, Kubernetes grants the QoS class as guaranteed which allows them to run at a higher priority than others. This policy requires that all containers within a Pod run with this definition resulting in a guaranteed QoS. This policy is provided with the intention that users will need to control its scope by using exclusions, preconditions, and other policy language mechanisms. +--- + +## Policy Definition +/other-cel/require-qos-guaranteed/require-qos-guaranteed.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-qos-guaranteed + annotations: + policies.kyverno.io/title: Require QoS Guaranteed in CEL expressions + policies.kyverno.io/category: Other, Multi-Tenancy in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Pod Quality of Service (QoS) is a mechanism to ensure Pods receive certain + priority guarantees based upon the resources they define. When Pods define both + requests and limits for both memory and CPU, and the requests and limits are equal + to each other, Kubernetes grants the QoS class as guaranteed which allows them to run + at a higher priority than others. This policy requires that all containers within a Pod + run with this definition resulting in a guaranteed QoS. This policy is provided with the + intention that users will need to control its scope by using + exclusions, preconditions, and other policy language mechanisms. +spec: + validationFailureAction: Audit + background: true + rules: + - name: guaranteed + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + has(container.resources) && + has(container.resources.requests) && + has(container.resources.requests.cpu) && has(container.resources.requests.memory) && + has(container.resources.limits) && + has(container.resources.limits.cpu) && has(container.resources.limits.memory) && + container.resources.requests.cpu == container.resources.limits.cpu && + container.resources.requests.memory == container.resources.limits.memory) + message: "All containers must define memory and CPU requests and limits where they are equal." + + +``` diff --git a/content/en/policies/other-cel/require-storageclass/require-storageclass.md b/content/en/policies/other-cel/require-storageclass/require-storageclass.md new file mode 100644 index 000000000..af4c8c4af --- /dev/null +++ b/content/en/policies/other-cel/require-storageclass/require-storageclass.md @@ -0,0 +1,69 @@ +--- +title: "Require StorageClass in CEL expressions" +category: Other, Multi-Tenancy in CEL +version: +subject: PersistentVolumeClaim, StatefulSet +policyType: "validate" +description: > + PersistentVolumeClaims (PVCs) and StatefulSets may optionally define a StorageClass to dynamically provision storage. In a multi-tenancy environment where StorageClasses are far more common, it is often better to require storage only be provisioned from these StorageClasses. This policy requires that PVCs and StatefulSets containing volumeClaimTemplates define the storageClassName field with some value. +--- + +## Policy Definition +/other-cel/require-storageclass/require-storageclass.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: require-storageclass + annotations: + policies.kyverno.io/title: Require StorageClass in CEL expressions + policies.kyverno.io/category: Other, Multi-Tenancy in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: PersistentVolumeClaim, StatefulSet + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + PersistentVolumeClaims (PVCs) and StatefulSets may optionally define a StorageClass + to dynamically provision storage. In a multi-tenancy environment where StorageClasses are + far more common, it is often better to require storage only be provisioned from these + StorageClasses. This policy requires that PVCs and StatefulSets containing + volumeClaimTemplates define the storageClassName field with some value. +spec: + validationFailureAction: Audit + background: true + rules: + - name: pvc-storageclass + match: + any: + - resources: + kinds: + - PersistentVolumeClaim + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.storageClassName) && object.spec.storageClassName != ''" + message: "PersistentVolumeClaims must define a storageClassName." + - name: ss-storageclass + match: + any: + - resources: + kinds: + - StatefulSet + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.volumeClaimTemplates) || + object.spec.volumeClaimTemplates.all(volumeClaimTemplate, + has(volumeClaimTemplate.spec.storageClassName) && volumeClaimTemplate.spec.storageClassName != '') + message: "StatefulSets must define a storageClassName." + + +``` diff --git a/content/en/policies/other-cel/restrict-annotations/restrict-annotations.md b/content/en/policies/other-cel/restrict-annotations/restrict-annotations.md new file mode 100644 index 000000000..85a173a32 --- /dev/null +++ b/content/en/policies/other-cel/restrict-annotations/restrict-annotations.md @@ -0,0 +1,56 @@ +--- +title: "Restrict Annotations in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Pod, Annotation +policyType: "validate" +description: > + Some annotations control functionality driven by other cluster-wide tools and are not normally set by some class of users. This policy prevents the use of an annotation beginning with `fluxcd.io/`. This can be useful to ensure users either don't set reserved annotations or to force them to use a newer version of an annotation. +--- + +## Policy Definition +/other-cel/restrict-annotations/restrict-annotations.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-annotations + annotations: + policies.kyverno.io/title: Restrict Annotations in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/minversion: 1.11.0 + policies.kyverno.io/subject: Pod, Annotation + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Some annotations control functionality driven by other cluster-wide tools and are not + normally set by some class of users. This policy prevents the use of an annotation beginning + with `fluxcd.io/`. This can be useful to ensure users either + don't set reserved annotations or to force them to use a newer version of an annotation. + pod-policies.kyverno.io/autogen-controllers: none +spec: + validationFailureAction: Audit + background: true + rules: + - name: block-flux-v1 + match: + any: + - resources: + kinds: + - Deployment + - CronJob + - Job + - StatefulSet + - DaemonSet + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.metadata.annotations) || !object.metadata.annotations.exists(annotation, annotation.startsWith('fluxcd.io/'))" + message: Cannot use Flux v1 annotation. + + +``` diff --git a/content/en/policies/other-cel/restrict-binding-clusteradmin/restrict-binding-clusteradmin.md b/content/en/policies/other-cel/restrict-binding-clusteradmin/restrict-binding-clusteradmin.md new file mode 100644 index 000000000..924d7b9f1 --- /dev/null +++ b/content/en/policies/other-cel/restrict-binding-clusteradmin/restrict-binding-clusteradmin.md @@ -0,0 +1,53 @@ +--- +title: "Restrict Binding to Cluster-Admin in CEL expressions" +category: Security in CEL +version: 1.11.0 +subject: RoleBinding, ClusterRoleBinding, RBAC +policyType: "validate" +description: > + The cluster-admin ClusterRole allows any action to be performed on any resource in the cluster and its granting should be heavily restricted. This policy prevents binding to the cluster-admin ClusterRole in RoleBinding or ClusterRoleBinding resources. +--- + +## Policy Definition +/other-cel/restrict-binding-clusteradmin/restrict-binding-clusteradmin.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-binding-clusteradmin + annotations: + policies.kyverno.io/title: Restrict Binding to Cluster-Admin in CEL expressions + policies.kyverno.io/category: Security in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: RoleBinding, ClusterRoleBinding, RBAC + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + The cluster-admin ClusterRole allows any action to be performed on any resource + in the cluster and its granting should be heavily restricted. This + policy prevents binding to the cluster-admin ClusterRole in + RoleBinding or ClusterRoleBinding resources. +spec: + validationFailureAction: Audit + background: true + rules: + - name: clusteradmin-bindings + match: + any: + - resources: + kinds: + - RoleBinding + - ClusterRoleBinding + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.roleRef.name != 'cluster-admin'" + message: "Binding to cluster-admin is not allowed." + + +``` diff --git a/content/en/policies/other-cel/restrict-binding-system-groups/restrict-binding-system-groups.md b/content/en/policies/other-cel/restrict-binding-system-groups/restrict-binding-system-groups.md new file mode 100644 index 000000000..d64223add --- /dev/null +++ b/content/en/policies/other-cel/restrict-binding-system-groups/restrict-binding-system-groups.md @@ -0,0 +1,57 @@ +--- +title: "Restrict Binding System Groups in CEL expressions" +category: Security, EKS Best Practices in CEL +version: 1.11.0 +subject: RoleBinding, ClusterRoleBinding, RBAC +policyType: "validate" +description: > + Certain system groups exist in Kubernetes which grant permissions that are used for certain system-level functions yet typically never appropriate for other users. This policy prevents creating bindings to some of these groups including system:anonymous, system:unauthenticated, and system:masters. +--- + +## Policy Definition +/other-cel/restrict-binding-system-groups/restrict-binding-system-groups.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-binding-system-groups + annotations: + policies.kyverno.io/title: Restrict Binding System Groups in CEL expressions + policies.kyverno.io/category: Security, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: RoleBinding, ClusterRoleBinding, RBAC + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Certain system groups exist in Kubernetes which grant permissions that + are used for certain system-level functions yet typically never appropriate + for other users. This policy prevents creating bindings to some of these + groups including system:anonymous, system:unauthenticated, and system:masters. +spec: + validationFailureAction: Audit + background: true + rules: + - name: restrict-subject-groups + match: + any: + - resources: + kinds: + - RoleBinding + - ClusterRoleBinding + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.subjects.all(subject, subject.name != 'system:anonymous')" + message: "Binding to system:anonymous is not allowed." + - expression: "object.subjects.all(subject, subject.name != 'system:unauthenticated')" + message: "Binding to system:unauthenticated is not allowed." + - expression: "object.subjects.all(subject, subject.name != 'system:masters')" + message: "Binding to system:masters is not allowed." + + +``` diff --git a/content/en/policies/other-cel/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.md b/content/en/policies/other-cel/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.md new file mode 100644 index 000000000..c41a20cf3 --- /dev/null +++ b/content/en/policies/other-cel/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.md @@ -0,0 +1,58 @@ +--- +title: "Restrict ClusterRole with Nodes Proxy in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: ClusterRole, RBAC +policyType: "validate" +description: > + A ClusterRole with nodes/proxy resource access allows a user to perform anything the kubelet API allows. It also allows users to bypass the API server and talk directly to the kubelet potentially circumventing audits and admission controllers. See https://blog.aquasec.com/privilege-escalation-kubernetes-rbac for more info. This policy prevents the creation of a ClusterRole if it contains the nodes/proxy resource. +--- + +## Policy Definition +/other-cel/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-clusterrole-nodesproxy + annotations: + policies.kyverno.io/title: Restrict ClusterRole with Nodes Proxy in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: ClusterRole, RBAC + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + A ClusterRole with nodes/proxy resource access allows a user to + perform anything the kubelet API allows. It also allows users to bypass + the API server and talk directly to the kubelet potentially circumventing + audits and admission controllers. See https://blog.aquasec.com/privilege-escalation-kubernetes-rbac + for more info. This policy prevents the creation + of a ClusterRole if it contains the nodes/proxy resource. +spec: + validationFailureAction: Audit + background: true + rules: + - name: clusterrole-nodesproxy + match: + any: + - resources: + kinds: + - ClusterRole + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + object.rules == null || + !object.rules.exists(rule, + rule.resources.exists(resource, resource == 'nodes/proxy') && + rule.apiGroups.exists(apiGroup, apiGroup == '')) + message: "A ClusterRole containing the nodes/proxy resource is not allowed." + + +``` diff --git a/content/en/policies/other-cel/restrict-controlplane-scheduling/restrict-controlplane-scheduling.md b/content/en/policies/other-cel/restrict-controlplane-scheduling/restrict-controlplane-scheduling.md new file mode 100644 index 000000000..8ed9beb1d --- /dev/null +++ b/content/en/policies/other-cel/restrict-controlplane-scheduling/restrict-controlplane-scheduling.md @@ -0,0 +1,53 @@ +--- +title: "Restrict control plane scheduling in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Scheduling non-system Pods to control plane nodes (which run kubelet) is often undesirable because it takes away resources from the control plane components and can represent a possible security threat vector. This policy prevents users from setting a toleration in a Pod spec which allows running on control plane nodes with the taint key `node-role.kubernetes.io/master`. +--- + +## Policy Definition +/other-cel/restrict-controlplane-scheduling/restrict-controlplane-scheduling.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-controlplane-scheduling + annotations: + policies.kyverno.io/title: Restrict control plane scheduling in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Scheduling non-system Pods to control plane nodes (which run kubelet) is often undesirable + because it takes away resources from the control plane components and can represent + a possible security threat vector. This policy prevents users from setting a toleration + in a Pod spec which allows running on control plane nodes + with the taint key `node-role.kubernetes.io/master`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: restrict-controlplane-scheduling-master + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.tolerations) || + !object.spec.tolerations.exists(toleration, has(toleration.key) && toleration.key in ['node-role.kubernetes.io/master', 'node-role.kubernetes.io/control-plane']) + message: Pods may not use tolerations which schedule on control plane nodes. + + +``` diff --git a/content/en/policies/other-cel/restrict-deprecated-registry/restrict-deprecated-registry.md b/content/en/policies/other-cel/restrict-deprecated-registry/restrict-deprecated-registry.md new file mode 100644 index 000000000..5d19e0929 --- /dev/null +++ b/content/en/policies/other-cel/restrict-deprecated-registry/restrict-deprecated-registry.md @@ -0,0 +1,55 @@ +--- +title: "Restrict Deprecated Registry in CEL expressions" +category: Best Practices, EKS Best Practices in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + Legacy k8s.gcr.io container image registry will be frozen in early April 2023 k8s.gcr.io image registry will be frozen from the 3rd of April 2023. Images for Kubernetes 1.27 will not be available in the k8s.gcr.io image registry. Please read our announcement for more details. https://kubernetes.io/blog/2023/02/06/k8s-gcr-io-freeze-announcement/ +--- + +## Policy Definition +/other-cel/restrict-deprecated-registry/restrict-deprecated-registry.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-deprecated-registry + annotations: + policies.kyverno.io/title: Restrict Deprecated Registry in CEL expressions + policies.kyverno.io/category: Best Practices, EKS Best Practices in CEL + policies.kyverno.io/severity: high + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.27-1.28" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Legacy k8s.gcr.io container image registry will be frozen in early April 2023 + k8s.gcr.io image registry will be frozen from the 3rd of April 2023. + Images for Kubernetes 1.27 will not be available in the k8s.gcr.io image registry. + Please read our announcement for more details. + https://kubernetes.io/blog/2023/02/06/k8s-gcr-io-freeze-announcement/ +spec: + validationFailureAction: Enforce + background: true + rules: + - name: restrict-deprecated-registry + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: "variables.allContainers.all(container, !container.image.startsWith('k8s.gcr.io/'))" + message: "The \"k8s.gcr.io\" image registry is deprecated. \"registry.k8s.io\" should now be used." + + +``` diff --git a/content/en/policies/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.md b/content/en/policies/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.md new file mode 100644 index 000000000..cd1cc29fd --- /dev/null +++ b/content/en/policies/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.md @@ -0,0 +1,55 @@ +--- +title: "Restrict Edit for Endpoints CVE-2021-25740 in CEL expressions" +category: Security in CEL +version: +subject: ClusterRole +policyType: "validate" +description: > + Clusters not initially installed with Kubernetes 1.22 may be vulnerable to an issue defined in CVE-2021-25740 which could enable users to send network traffic to locations they would otherwise not have access to via a confused deputy attack. This was due to the system:aggregate-to-edit ClusterRole having edit permission of Endpoints. This policy, intended to run in background mode, checks if your cluster is vulnerable to CVE-2021-25740 by ensuring the system:aggregate-to-edit ClusterRole does not have the edit permission of Endpoints. +--- + +## Policy Definition +/other-cel/restrict-edit-for-endpoints/restrict-edit-for-endpoints.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-edit-for-endpoints + annotations: + policies.kyverno.io/title: Restrict Edit for Endpoints CVE-2021-25740 in CEL expressions + policies.kyverno.io/category: Security in CEL + policies.kyverno.io/severity: low + policies.kyverno.io/subject: ClusterRole + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Clusters not initially installed with Kubernetes 1.22 may be vulnerable to an issue + defined in CVE-2021-25740 which could enable users to send network traffic to locations + they would otherwise not have access to via a confused deputy attack. This was due to + the system:aggregate-to-edit ClusterRole having edit permission of Endpoints. + This policy, intended to run in background mode, checks if your cluster is vulnerable + to CVE-2021-25740 by ensuring the system:aggregate-to-edit ClusterRole does not have + the edit permission of Endpoints. +spec: + validationFailureAction: Audit + background: true + rules: + - name: system-aggregate-to-edit-check + match: + any: + - resources: + kinds: + - ClusterRole + names: + - system:aggregate-to-edit + validate: + cel: + expressions: + - expression: "!object.rules.exists(rule, 'endpoints' in rule.resources && 'edit' in rule.verbs)" + message: >- + This cluster may still be vulnerable to CVE-2021-25740. The system:aggregate-to-edit ClusterRole + should not have edit permission over Endpoints. + + +``` diff --git a/content/en/policies/other-cel/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.md b/content/en/policies/other-cel/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.md new file mode 100644 index 000000000..c01b11f8b --- /dev/null +++ b/content/en/policies/other-cel/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.md @@ -0,0 +1,64 @@ +--- +title: "Restrict Escalation Verbs in Roles in CEL expressions" +category: Security in CEL +version: 1.11.0 +subject: Role, ClusterRole, RBAC +policyType: "validate" +description: > + The verbs `impersonate`, `bind`, and `escalate` may all potentially lead to privilege escalation and should be tightly controlled. This policy prevents use of these verbs in Role or ClusterRole resources. +--- + +## Policy Definition +/other-cel/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-escalation-verbs-roles + annotations: + policies.kyverno.io/title: Restrict Escalation Verbs in Roles in CEL expressions + policies.kyverno.io/category: Security in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Role, ClusterRole, RBAC + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + The verbs `impersonate`, `bind`, and `escalate` may all potentially lead to + privilege escalation and should be tightly controlled. This policy prevents + use of these verbs in Role or ClusterRole resources. +spec: + validationFailureAction: Audit + background: true + rules: + - name: escalate + match: + any: + - resources: + kinds: + - Role + - ClusterRole + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: apiGroups + expression: "['*', 'rbac.authorization.k8s.io']" + - name: resources + expression: "['*', 'clusterroles', 'roles']" + - name: verbs + expression: "['*', 'bind', 'escalate', 'impersonate']" + expressions: + - expression: >- + object.rules == null || + !object.rules.exists(rule, + rule.apiGroups.exists(apiGroup, apiGroup in variables.apiGroups) && + rule.resources.exists(resource, resource in variables.resources) && + rule.verbs.exists(verb, verb in variables.verbs)) + message: "Use of verbs `escalate`, `bind`, and `impersonate` are forbidden." + + +``` diff --git a/content/en/policies/other-cel/restrict-ingress-classes/restrict-ingress-classes.md b/content/en/policies/other-cel/restrict-ingress-classes/restrict-ingress-classes.md new file mode 100644 index 000000000..e661b5088 --- /dev/null +++ b/content/en/policies/other-cel/restrict-ingress-classes/restrict-ingress-classes.md @@ -0,0 +1,56 @@ +--- +title: "Restrict Ingress Classes in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Ingress +policyType: "validate" +description: > + Ingress classes should only be allowed which match up to deployed Ingress controllers in the cluster. Allowing users to define classes which cannot be satisfied by a deployed Ingress controller can result in either no or undesired functionality. This policy checks Ingress resources and only allows those which define `HAProxy` or `nginx` in the respective annotation. This annotation has largely been replaced as of Kubernetes 1.18 with the IngressClass resource. +--- + +## Policy Definition +/other-cel/restrict-ingress-classes/restrict-ingress-classes.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-ingress-classes + annotations: + policies.kyverno.io/title: Restrict Ingress Classes in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Ingress + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Ingress classes should only be allowed which match up to deployed Ingress controllers + in the cluster. Allowing users to define classes which cannot be satisfied by a deployed + Ingress controller can result in either no or undesired functionality. This policy checks + Ingress resources and only allows those which define `HAProxy` or `nginx` in the respective + annotation. This annotation has largely been replaced as of Kubernetes 1.18 with the IngressClass + resource. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-ingress + match: + any: + - resources: + kinds: + - Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + has(object.metadata.annotations) && + 'kubernetes.io/ingress.class' in object.metadata.annotations && + object.metadata.annotations['kubernetes.io/ingress.class'] in ['HAProxy', 'nginx'] + message: "Unknown ingress class." + + +``` diff --git a/content/en/policies/other-cel/restrict-ingress-defaultbackend/restrict-ingress-defaultbackend.md b/content/en/policies/other-cel/restrict-ingress-defaultbackend/restrict-ingress-defaultbackend.md new file mode 100644 index 000000000..6e002d7c3 --- /dev/null +++ b/content/en/policies/other-cel/restrict-ingress-defaultbackend/restrict-ingress-defaultbackend.md @@ -0,0 +1,54 @@ +--- +title: "Restrict Ingress defaultBackend in CEL expressions" +category: Best Practices in CEL +version: 1.11.0 +subject: Ingress +policyType: "validate" +description: > + An Ingress with no rules sends all traffic to a single default backend. The defaultBackend is conventionally a configuration option of the Ingress controller and is not specified in your Ingress resources. If none of the hosts or paths match the HTTP request in the Ingress objects, the traffic is routed to your default backend. In a multi-tenant environment, you want users to use explicit hosts, they should not be able to overwrite the global default backend service. This policy prohibits the use of the defaultBackend field. +--- + +## Policy Definition +/other-cel/restrict-ingress-defaultbackend/restrict-ingress-defaultbackend.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-ingress-defaultbackend + annotations: + policies.kyverno.io/title: Restrict Ingress defaultBackend in CEL expressions + policies.kyverno.io/category: Best Practices in CEL + policies.kyverno.io/severity: high + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Ingress + policies.kyverno.io/description: >- + An Ingress with no rules sends all traffic to a single default backend. The defaultBackend + is conventionally a configuration option of the Ingress controller and is not specified in + your Ingress resources. If none of the hosts or paths match the HTTP request in the Ingress + objects, the traffic is routed to your default backend. In a multi-tenant environment, you + want users to use explicit hosts, they should not be able to overwrite the global default backend + service. This policy prohibits the use of the defaultBackend field. +spec: + validationFailureAction: Audit + background: true + rules: + - name: restrict-ingress-defaultbackend + match: + any: + - resources: + kinds: + - Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.spec.defaultBackend)" + message: Setting the defaultBackend field is prohibited. + + +``` diff --git a/content/en/policies/other-cel/restrict-ingress-wildcard/restrict-ingress-wildcard.md b/content/en/policies/other-cel/restrict-ingress-wildcard/restrict-ingress-wildcard.md new file mode 100644 index 000000000..1098cff8f --- /dev/null +++ b/content/en/policies/other-cel/restrict-ingress-wildcard/restrict-ingress-wildcard.md @@ -0,0 +1,53 @@ +--- +title: "Restrict Ingress Host with Wildcards in CEL expressions" +category: Other in CEL +version: 1.11.0 +subject: Ingress +policyType: "validate" +description: > + Ingress hosts optionally accept a wildcard as an alternative to precise matching. In some cases, this may be too permissive as it would direct unintended traffic to the given Ingress resource. This policy enforces that any Ingress host does not contain a wildcard character. +--- + +## Policy Definition +/other-cel/restrict-ingress-wildcard/restrict-ingress-wildcard.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-ingress-wildcard + annotations: + policies.kyverno.io/title: Restrict Ingress Host with Wildcards in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Ingress + policies.kyverno.io/description: >- + Ingress hosts optionally accept a wildcard as an alternative + to precise matching. In some cases, this may be too permissive as it + would direct unintended traffic to the given Ingress resource. This + policy enforces that any Ingress host does not contain a wildcard + character. +spec: + validationFailureAction: Audit + background: true + rules: + - name: block-ingress-wildcard + match: + any: + - resources: + kinds: + - Ingress + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.spec.rules) || !object.spec.rules.exists(rule, has(rule.host) && rule.host.contains('*'))" + message: "Wildcards are not permitted as hosts." + + +``` diff --git a/content/en/policies/other-cel/restrict-jobs/restrict-jobs.md b/content/en/policies/other-cel/restrict-jobs/restrict-jobs.md new file mode 100644 index 000000000..4e202d4aa --- /dev/null +++ b/content/en/policies/other-cel/restrict-jobs/restrict-jobs.md @@ -0,0 +1,49 @@ +--- +title: "Restrict Jobs in CEL expressions" +category: Other in CEL +version: +subject: Job +policyType: "validate" +description: > + Jobs can be created directly and indirectly via a CronJob controller. In some cases, users may want to only allow Jobs if they are created via a CronJob. This policy restricts Jobs so they may only be created by a CronJob. +--- + +## Policy Definition +/other-cel/restrict-jobs/restrict-jobs.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-jobs + annotations: + policies.kyverno.io/title: Restrict Jobs in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Job + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Jobs can be created directly and indirectly via a CronJob controller. + In some cases, users may want to only allow Jobs if they are created via a CronJob. + This policy restricts Jobs so they may only be created by a CronJob. +spec: + validationFailureAction: Enforce + rules: + - name: restrict-job-from-cronjob + match: + any: + - resources: + kinds: + - Job + celPreconditions: + - name: "not-created-by-cronjob" + expression: "!has(object.metadata.ownerReferences) || object.metadata.ownerReferences[0].kind != 'CronJob'" + validate: + cel: + expressions: + - expression: "false" + message: Jobs are only allowed if spawned from CronJobs. + + +``` diff --git a/content/en/policies/other-cel/restrict-loadbalancer/restrict-loadbalancer.md b/content/en/policies/other-cel/restrict-loadbalancer/restrict-loadbalancer.md new file mode 100644 index 000000000..d4cd3c741 --- /dev/null +++ b/content/en/policies/other-cel/restrict-loadbalancer/restrict-loadbalancer.md @@ -0,0 +1,52 @@ +--- +title: "Disallow Service Type LoadBalancer in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Service +policyType: "validate" +description: > + Especially in cloud provider environments, a Service having type LoadBalancer will cause the provider to respond by creating a load balancer somewhere in the customer account. This adds cost and complexity to a deployment. Without restricting this ability, users may easily overrun established budgets and security practices set by the organization. This policy restricts use of the Service type LoadBalancer. +--- + +## Policy Definition +/other-cel/restrict-loadbalancer/restrict-loadbalancer.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: no-loadbalancer-service + annotations: + policies.kyverno.io/title: Disallow Service Type LoadBalancer in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Service + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Especially in cloud provider environments, a Service having type LoadBalancer will cause the + provider to respond by creating a load balancer somewhere in the customer account. This adds + cost and complexity to a deployment. Without restricting this ability, users may easily + overrun established budgets and security practices set by the organization. This policy restricts + use of the Service type LoadBalancer. +spec: + validationFailureAction: Audit + background: true + rules: + - name: no-LoadBalancer + match: + any: + - resources: + kinds: + - Service + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.type != 'LoadBalancer'" + message: "Service of type LoadBalancer is not allowed." + + +``` diff --git a/content/en/policies/other-cel/restrict-networkpolicy-empty-podselector/restrict-networkpolicy-empty-podselector.md b/content/en/policies/other-cel/restrict-networkpolicy-empty-podselector/restrict-networkpolicy-empty-podselector.md new file mode 100644 index 000000000..bb024de68 --- /dev/null +++ b/content/en/policies/other-cel/restrict-networkpolicy-empty-podselector/restrict-networkpolicy-empty-podselector.md @@ -0,0 +1,58 @@ +--- +title: "Restrict NetworkPolicy with Empty podSelector in CEL expressions" +category: Other, Multi-Tenancy in CEL +version: 1.11.0 +subject: NetworkPolicy +policyType: "validate" +description: > + By default, all pods in a Kubernetes cluster are allowed to communicate with each other, and all network traffic is unencrypted. It is recommended to not use an empty podSelector in order to more closely control the necessary traffic flows. This policy requires that all NetworkPolicies other than that of `default-deny` not use an empty podSelector. +--- + +## Policy Definition +/other-cel/restrict-networkpolicy-empty-podselector/restrict-networkpolicy-empty-podselector.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-networkpolicy-empty-podselector + annotations: + policies.kyverno.io/title: Restrict NetworkPolicy with Empty podSelector in CEL expressions + policies.kyverno.io/category: Other, Multi-Tenancy in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: NetworkPolicy + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + By default, all pods in a Kubernetes cluster are allowed to communicate with each other, and all + network traffic is unencrypted. It is recommended to not use an empty podSelector in order to + more closely control the necessary traffic flows. This policy requires that all NetworkPolicies + other than that of `default-deny` not use an empty podSelector. +spec: + validationFailureAction: Audit + background: true + rules: + - name: empty-podselector + match: + any: + - resources: + kinds: + - NetworkPolicy + operations: + - CREATE + - UPDATE + exclude: + any: + - resources: + kinds: + - NetworkPolicy + names: + - default-deny + validate: + cel: + expressions: + - expression: "size(object.spec.podSelector) != 0" + message: "NetworkPolicies must not use an empty podSelector." + + +``` diff --git a/content/en/policies/other-cel/restrict-node-affinity/restrict-node-affinity.md b/content/en/policies/other-cel/restrict-node-affinity/restrict-node-affinity.md new file mode 100644 index 000000000..126802bbc --- /dev/null +++ b/content/en/policies/other-cel/restrict-node-affinity/restrict-node-affinity.md @@ -0,0 +1,52 @@ +--- +title: "Restrict Node Affinity in CEL expressions" +category: Other in CEL +version: +subject: Pod +policyType: "validate" +description: > + Pods may use several mechanisms to prefer scheduling on a set of nodes, and nodeAffinity is one of them. nodeAffinity uses expressions to select eligible nodes for scheduling decisions and may override intended placement options by cluster administrators. This policy ensures that nodeAffinity is not used in a Pod spec. +--- + +## Policy Definition +/other-cel/restrict-node-affinity/restrict-node-affinity.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-node-affinity + annotations: + policies.kyverno.io/title: Restrict Node Affinity in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Pods may use several mechanisms to prefer scheduling on a set of nodes, + and nodeAffinity is one of them. nodeAffinity uses expressions to select + eligible nodes for scheduling decisions and may override intended placement + options by cluster administrators. This policy ensures that nodeAffinity + is not used in a Pod spec. +spec: + background: true + validationFailureAction: Audit + rules: + - name: check-nodeaffinity + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "!has(object.spec.affinity) || !has(object.spec.affinity.nodeAffinity)" + message: "Node affinity cannot be used." + + +``` diff --git a/content/en/policies/other-cel/restrict-node-label-creation/restrict-node-label-creation.md b/content/en/policies/other-cel/restrict-node-label-creation/restrict-node-label-creation.md new file mode 100644 index 000000000..d1812d49e --- /dev/null +++ b/content/en/policies/other-cel/restrict-node-label-creation/restrict-node-label-creation.md @@ -0,0 +1,56 @@ +--- +title: "Restrict node label creation in CEL expressions" +category: Sample in CEL +version: +subject: Node, Label +policyType: "validate" +description: > + Node labels are critical pieces of metadata upon which many other applications and logic may depend and should not be altered or removed by regular users. Many cloud providers also use Node labels to signal specific functions to applications. This policy prevents setting of a new label called `foo` on cluster Nodes. Use of this policy requires removal of the Node resource filter in the Kyverno ConfigMap ([Node,*,*]). Due to Kubernetes CVE-2021-25735, this policy requires, at minimum, one of the following versions of Kubernetes: v1.18.18, v1.19.10, v1.20.6, or v1.21.0. +--- + +## Policy Definition +/other-cel/restrict-node-label-creation/restrict-node-label-creation.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-node-label-creation + annotations: + policies.kyverno.io/title: Restrict node label creation in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/subject: Node, Label + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Node labels are critical pieces of metadata upon which many other applications and + logic may depend and should not be altered or removed by regular users. Many cloud + providers also use Node labels to signal specific functions to applications. + This policy prevents setting of a new label called `foo` on + cluster Nodes. Use of this policy requires removal of the Node resource filter + in the Kyverno ConfigMap ([Node,*,*]). Due to Kubernetes CVE-2021-25735, this policy + requires, at minimum, one of the following versions of Kubernetes: + v1.18.18, v1.19.10, v1.20.6, or v1.21.0. +spec: + validationFailureAction: Enforce + background: false + rules: + - name: prevent-label-set + match: + any: + - resources: + kinds: + - Node + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + - name: "has-foo-label" + expression: "has(object.metadata.labels) && 'foo' in object.metadata.labels" + validate: + cel: + expressions: + - expression: "false" + message: "Setting the `foo` label on a Node is not allowed." + + +``` diff --git a/content/en/policies/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.md b/content/en/policies/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.md new file mode 100644 index 000000000..758924c19 --- /dev/null +++ b/content/en/policies/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.md @@ -0,0 +1,75 @@ +--- +title: "Restrict Pod Controller ServiceAccount Updates in CEL Expressions" +category: Other in CEL +version: +subject: Pod +policyType: "validate" +description: > + ServiceAccounts which have the ability to edit/patch workloads which they created may potentially use that privilege to update to a different ServiceAccount with higher privileges. This policy, intended to be run in `enforce` mode, blocks updates to Pod controllers if those updates modify the serviceAccountName field. Updates to Pods directly for this field are not possible as it is immutable once set. +--- + +## Policy Definition +/other-cel/restrict-pod-controller-serviceaccount-updates/restrict-pod-controller-serviceaccount-updates.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-pod-controller-serviceaccount-updates + annotations: + policies.kyverno.io/title: Restrict Pod Controller ServiceAccount Updates in CEL Expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: Medium + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.12.1 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + ServiceAccounts which have the ability to edit/patch workloads which they created + may potentially use that privilege to update to a different ServiceAccount with higher + privileges. This policy, intended to be run in `enforce` mode, blocks updates + to Pod controllers if those updates modify the serviceAccountName field. Updates to Pods + directly for this field are not possible as it is immutable once set. +spec: + validationFailureAction: Audit + background: true + rules: + - name: block-serviceaccount-updates + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + validate: + cel: + expressions: + - expression: >- + object.spec.template.spec.?serviceAccountName.orValue('empty') == oldObject.spec.template.spec.?serviceAccountName.orValue('empty') + message: >- + The serviceAccountName field may not be changed once created. + - name: block-serviceaccount-updates-cronjob + match: + any: + - resources: + kinds: + - CronJob + celPreconditions: + - name: "operation-should-be-update" + expression: "request.operation == 'UPDATE'" + validate: + cel: + expressions: + - expression: >- + object.spec.jobTemplate.spec.template.spec.?serviceAccountName.orValue('empty') == oldObject.spec.jobTemplate.spec.template.spec.?serviceAccountName.orValue('empty') + message: >- + The serviceAccountName field may not be changed once created. + + +``` diff --git a/content/en/policies/other-cel/restrict-sa-automount-sa-token/restrict-sa-automount-sa-token.md b/content/en/policies/other-cel/restrict-sa-automount-sa-token/restrict-sa-automount-sa-token.md new file mode 100644 index 000000000..adb93a9d8 --- /dev/null +++ b/content/en/policies/other-cel/restrict-sa-automount-sa-token/restrict-sa-automount-sa-token.md @@ -0,0 +1,52 @@ +--- +title: "Restrict Auto-Mount of Service Account Tokens in Service Account in CEL expressions" +category: Security in CEL +version: +subject: Secret,ServiceAccount +policyType: "validate" +description: > + Kubernetes automatically mounts ServiceAccount credentials in each ServiceAccount. The ServiceAccount may be assigned roles allowing Pods to access API resources. Blocking this ability is an extension of the least privilege best practice and should be followed if Pods do not need to speak to the API server to function. This policy ensures that mounting of these ServiceAccount tokens is blocked. +--- + +## Policy Definition +/other-cel/restrict-sa-automount-sa-token/restrict-sa-automount-sa-token.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-sa-automount-sa-token + annotations: + policies.kyverno.io/title: Restrict Auto-Mount of Service Account Tokens in Service Account in CEL expressions + policies.kyverno.io/category: Security in CEL + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Secret,ServiceAccount + policies.kyverno.io/description: >- + Kubernetes automatically mounts ServiceAccount credentials in each ServiceAccount. + The ServiceAccount may be assigned roles allowing Pods to access API resources. + Blocking this ability is an extension of the least privilege best practice and should + be followed if Pods do not need to speak to the API server to function. + This policy ensures that mounting of these ServiceAccount tokens is blocked. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-sa-automountServiceAccountToken + match: + any: + - resources: + kinds: + - ServiceAccount + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.automountServiceAccountToken) && object.automountServiceAccountToken == false" + message: "ServiceAccounts must set automountServiceAccountToken to false." + + +``` diff --git a/content/en/policies/other-cel/restrict-secret-role-verbs/restrict-secret-role-verbs.md b/content/en/policies/other-cel/restrict-secret-role-verbs/restrict-secret-role-verbs.md new file mode 100644 index 000000000..e91db5080 --- /dev/null +++ b/content/en/policies/other-cel/restrict-secret-role-verbs/restrict-secret-role-verbs.md @@ -0,0 +1,61 @@ +--- +title: "Restrict Secret Verbs in Roles in CEL expressions" +category: Security in CEL +version: 1.11.0 +subject: Role, ClusterRole, RBAC +policyType: "validate" +description: > + The verbs `get`, `list`, and `watch` in a Role or ClusterRole, when paired with the Secrets resource, effectively allows Secrets to be read which may expose sensitive information. This policy prevents a Role or ClusterRole from using these verbs in tandem with Secret resources. In order to fully implement this control, it is recommended to pair this policy with another which also prevents use of the wildcard ('*') in the verbs list either when explicitly naming Secrets or when also using a wildcard in the base API group. +--- + +## Policy Definition +/other-cel/restrict-secret-role-verbs/restrict-secret-role-verbs.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-secret-role-verbs + annotations: + policies.kyverno.io/title: Restrict Secret Verbs in Roles in CEL expressions + policies.kyverno.io/category: Security in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Role, ClusterRole, RBAC + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + The verbs `get`, `list`, and `watch` in a Role or ClusterRole, when paired with the Secrets resource, effectively + allows Secrets to be read which may expose sensitive information. This policy prevents + a Role or ClusterRole from using these verbs in tandem with Secret resources. In order to + fully implement this control, it is recommended to pair this policy with another which + also prevents use of the wildcard ('*') in the verbs list either when explicitly naming Secrets + or when also using a wildcard in the base API group. +spec: + validationFailureAction: Audit + background: true + rules: + - name: secret-verbs + match: + any: + - resources: + kinds: + - Role + - ClusterRole + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: forbiddenVerbs + expression: "['get','list','watch']" + expressions: + - expression: >- + object.rules == null || + !object.rules.exists(rule, + 'secrets' in rule.resources && rule.verbs.exists(verb, verb in variables.forbiddenVerbs)) + message: "Requesting verbs `get`, `list`, or `watch` on Secrets is forbidden." + + +``` diff --git a/content/en/policies/other-cel/restrict-secrets-by-name/restrict-secrets-by-name.md b/content/en/policies/other-cel/restrict-secrets-by-name/restrict-secrets-by-name.md new file mode 100644 index 000000000..654f64b9a --- /dev/null +++ b/content/en/policies/other-cel/restrict-secrets-by-name/restrict-secrets-by-name.md @@ -0,0 +1,95 @@ +--- +title: "Restrict Secrets by Name in CEL expressions" +category: Other in CEL +version: +subject: Pod, Secret +policyType: "validate" +description: > + Secrets often contain sensitive information and their access should be carefully controlled. Although Kubernetes RBAC can be effective at restricting them in several ways, it lacks the ability to use wildcards in resource names. This policy ensures that only Secrets beginning with the name `safe-` can be consumed by Pods. In order to work effectively, this policy needs to be paired with a separate policy or rule to require `automountServiceAccountToken=false` since this would otherwise result in a Secret being mounted. +--- + +## Policy Definition +/other-cel/restrict-secrets-by-name/restrict-secrets-by-name.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-secrets-by-name + annotations: + policies.kyverno.io/title: Restrict Secrets by Name in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/subject: Pod, Secret + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Secrets often contain sensitive information and their access should be carefully controlled. + Although Kubernetes RBAC can be effective at restricting them in several ways, + it lacks the ability to use wildcards in resource names. This policy ensures + that only Secrets beginning with the name `safe-` can be consumed by Pods. + In order to work effectively, this policy needs to be paired with a separate policy + or rule to require `automountServiceAccountToken=false` since this would otherwise + result in a Secret being mounted. +spec: + background: false + validationFailureAction: Audit + rules: + - name: safe-secrets-from-env + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: >- + variables.allContainers.all(container, + !has(container.env) || container.env.all(env, + !has(env.valueFrom) || !has(env.valueFrom.secretKeyRef) || env.valueFrom.secretKeyRef.name.startsWith("safe-"))) + message: "Only Secrets beginning with `safe-` may be consumed in env statements." + - name: safe-secrets-from-envfrom + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + expressions: + - expression: >- + variables.allContainers.all(container, + !has(container.envFrom) || container.envFrom.all(env, + !has(env.secretRef) || env.secretRef.name.startsWith("safe-"))) + message: "Only Secrets beginning with `safe-` may be consumed in envFrom statements." + - name: safe-secrets-from-volumes + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: >- + !has(object.spec.volumes) || object.spec.volumes.all(volume, + !has(volume.secret) || volume.secret.secretName.startsWith("safe-")) + message: "Only Secrets beginning with `safe-` may be consumed in volumes." + + +``` diff --git a/content/en/policies/other-cel/restrict-service-port-range/restrict-service-port-range.md b/content/en/policies/other-cel/restrict-service-port-range/restrict-service-port-range.md new file mode 100644 index 000000000..115b087e3 --- /dev/null +++ b/content/en/policies/other-cel/restrict-service-port-range/restrict-service-port-range.md @@ -0,0 +1,52 @@ +--- +title: "Restrict Service Port Range in CEL expressions" +category: Other in CEL +version: 1.11.0 +subject: Service +policyType: "validate" +description: > + Services which are allowed to expose any port number may be able to impact other applications running on the Node which require them, or may make specifying security policy externally more challenging. This policy enforces that only the port range 32000 to 33000 may be used for Service resources. +--- + +## Policy Definition +/other-cel/restrict-service-port-range/restrict-service-port-range.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-service-port-range + annotations: + policies.kyverno.io/title: Restrict Service Port Range in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Service + policies.kyverno.io/description: >- + Services which are allowed to expose any port number may be able + to impact other applications running on the Node which require them, + or may make specifying security policy externally more challenging. + This policy enforces that only the port range 32000 to 33000 may + be used for Service resources. +spec: + validationFailureAction: Audit + rules: + - name: restrict-port-range + match: + any: + - resources: + kinds: + - Service + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.spec.ports.all(p, p.port >= 32000 && p.port <= 33000)" + message: Ports must be between 32000-33000 + + +``` diff --git a/content/en/policies/other-cel/restrict-storageclass/restrict-storageclass.md b/content/en/policies/other-cel/restrict-storageclass/restrict-storageclass.md new file mode 100644 index 000000000..284fb0edf --- /dev/null +++ b/content/en/policies/other-cel/restrict-storageclass/restrict-storageclass.md @@ -0,0 +1,53 @@ +--- +title: "Restrict StorageClass in CEL expressions" +category: Other, Multi-Tenancy in CEL +version: +subject: StorageClass +policyType: "validate" +description: > + StorageClasses allow description of custom "classes" of storage offered by the cluster, based on quality-of-service levels, backup policies, or custom policies determined by the cluster administrators. For shared StorageClasses in a multi-tenancy environment, a reclaimPolicy of `Delete` should be used to ensure a PersistentVolume cannot be reused across Namespaces. This policy requires StorageClasses set a reclaimPolicy of `Delete`. +--- + +## Policy Definition +/other-cel/restrict-storageclass/restrict-storageclass.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-storageclass + annotations: + policies.kyverno.io/title: Restrict StorageClass in CEL expressions + policies.kyverno.io/category: Other, Multi-Tenancy in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: StorageClass + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + StorageClasses allow description of custom "classes" of storage offered + by the cluster, based on quality-of-service levels, backup policies, or + custom policies determined by the cluster administrators. For shared StorageClasses + in a multi-tenancy environment, a reclaimPolicy of `Delete` should be used to ensure + a PersistentVolume cannot be reused across Namespaces. This policy requires + StorageClasses set a reclaimPolicy of `Delete`. +spec: + validationFailureAction: Audit + background: true + rules: + - name: storageclass-delete + match: + any: + - resources: + kinds: + - StorageClass + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.reclaimPolicy == 'Delete'" + message: "StorageClass must define a reclaimPolicy of Delete." + + +``` diff --git a/content/en/policies/other-cel/restrict-usergroup-fsgroup-id/restrict-usergroup-fsgroup-id.md b/content/en/policies/other-cel/restrict-usergroup-fsgroup-id/restrict-usergroup-fsgroup-id.md new file mode 100644 index 000000000..f3cb869c5 --- /dev/null +++ b/content/en/policies/other-cel/restrict-usergroup-fsgroup-id/restrict-usergroup-fsgroup-id.md @@ -0,0 +1,56 @@ +--- +title: "Validate User ID, Group ID, and FS Group in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Pod +policyType: "validate" +description: > + All processes inside a Pod can be made to run with specific user and groupID by setting `runAsUser` and `runAsGroup` respectively. `fsGroup` can be specified to make sure any file created in the volume will have the specified groupID. This policy validates that these fields are set to the defined values. +--- + +## Policy Definition +/other-cel/restrict-usergroup-fsgroup-id/restrict-usergroup-fsgroup-id.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: validate-userid-groupid-fsgroup + annotations: + policies.kyverno.io/title: Validate User ID, Group ID, and FS Group in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + All processes inside a Pod can be made to run with specific user and groupID + by setting `runAsUser` and `runAsGroup` respectively. `fsGroup` can be specified + to make sure any file created in the volume will have the specified groupID. + This policy validates that these fields are set to the defined values. +spec: + validationFailureAction: Audit + background: true + rules: + - name: validate-userid-groupid-fsgroup + match: + any: + - resources: + kinds: + - Pod + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "has(object.spec.securityContext.runAsUser) && object.spec.securityContext.runAsUser == 1000" + message: "User ID should be 1000." + - expression: "has(object.spec.securityContext.runAsGroup) && object.spec.securityContext.runAsGroup == 3000" + message: "Group ID should be 3000." + - expression: "has(object.spec.securityContext.fsGroup) && object.spec.securityContext.fsGroup == 2000" + message: "fs Group should be 2000." + + +``` diff --git a/content/en/policies/other-cel/restrict-wildcard-resources/restrict-wildcard-resources.md b/content/en/policies/other-cel/restrict-wildcard-resources/restrict-wildcard-resources.md new file mode 100644 index 000000000..82dbe17c7 --- /dev/null +++ b/content/en/policies/other-cel/restrict-wildcard-resources/restrict-wildcard-resources.md @@ -0,0 +1,54 @@ +--- +title: "Restrict Wildcards in Resources in CEL expressions" +category: Security, EKS Best Practices in CEL +version: 1.11.0 +subject: ClusterRole, Role, RBAC +policyType: "validate" +description: > + Wildcards ('*') in resources grants access to all of the resources referenced by the given API group and does not follow the principal of least privilege. As much as possible, avoid such open resources unless scoped to perhaps a custom API group. This policy blocks any Role or ClusterRole that contains a wildcard entry in the resources list found in any rule. +--- + +## Policy Definition +/other-cel/restrict-wildcard-resources/restrict-wildcard-resources.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-wildcard-resources + annotations: + policies.kyverno.io/title: Restrict Wildcards in Resources in CEL expressions + policies.kyverno.io/category: Security, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: ClusterRole, Role, RBAC + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Wildcards ('*') in resources grants access to all of the resources referenced by + the given API group and does not follow the principal of least privilege. As much as possible, + avoid such open resources unless scoped to perhaps a custom API group. + This policy blocks any Role or ClusterRole that contains a wildcard entry in + the resources list found in any rule. +spec: + validationFailureAction: Audit + background: true + rules: + - name: wildcard-resources + match: + any: + - resources: + kinds: + - Role + - ClusterRole + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.rules == null || !object.rules.exists(rule, '*' in rule.resources)" + message: "Use of a wildcard ('*') in any resources is forbidden." + + +``` diff --git a/content/en/policies/other-cel/restrict-wildcard-verbs/restrict-wildcard-verbs.md b/content/en/policies/other-cel/restrict-wildcard-verbs/restrict-wildcard-verbs.md new file mode 100644 index 000000000..69e0e6469 --- /dev/null +++ b/content/en/policies/other-cel/restrict-wildcard-verbs/restrict-wildcard-verbs.md @@ -0,0 +1,54 @@ +--- +title: "Restrict Wildcard in Verbs in CEL expressions" +category: Security, EKS Best Practices in CEL +version: 1.11.0 +subject: Role, ClusterRole, RBAC +policyType: "validate" +description: > + Wildcards ('*') in verbs grants all access to the resources referenced by it and does not follow the principal of least privilege. As much as possible, avoid such open verbs unless scoped to perhaps a custom API group. This policy blocks any Role or ClusterRole that contains a wildcard entry in the verbs list found in any rule. +--- + +## Policy Definition +/other-cel/restrict-wildcard-verbs/restrict-wildcard-verbs.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: restrict-wildcard-verbs + annotations: + policies.kyverno.io/title: Restrict Wildcard in Verbs in CEL expressions + policies.kyverno.io/category: Security, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Role, ClusterRole, RBAC + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Wildcards ('*') in verbs grants all access to the resources referenced by it and + does not follow the principal of least privilege. As much as possible, + avoid such open verbs unless scoped to perhaps a custom API group. + This policy blocks any Role or ClusterRole that contains a wildcard entry in + the verbs list found in any rule. +spec: + validationFailureAction: Audit + background: true + rules: + - name: wildcard-verbs + match: + any: + - resources: + kinds: + - Role + - ClusterRole + operations: + - CREATE + - UPDATE + validate: + cel: + expressions: + - expression: "object.rules == null || !object.rules.exists(rule, '*' in rule.verbs)" + message: "Use of a wildcard ('*') in any verbs is forbidden." + + +``` diff --git a/content/en/policies/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.md b/content/en/policies/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.md new file mode 100644 index 000000000..0357db6c5 --- /dev/null +++ b/content/en/policies/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.md @@ -0,0 +1,60 @@ +--- +title: "Spread Pods Across Nodes & Zones in CEL expressions" +category: Sample in CEL +version: 1.11.0 +subject: Deployment, StatefulSet +policyType: "validate" +description: > + Deployments to a Kubernetes cluster with multiple availability zones often need to distribute those replicas to align with those zones to ensure site-level failures do not impact availability. This policy ensures topologySpreadConstraints are defined, to spread pods over nodes and zones. Deployments or Statefulsets with less than 3 replicas are skipped. +--- + +## Policy Definition +/other-cel/topologyspreadconstraints-policy/topologyspreadconstraints-policy.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: topologyspreadconstraints-policy + annotations: + policies.kyverno.io/title: Spread Pods Across Nodes & Zones in CEL expressions + kyverno.io/kubernetes-version: "1.26-1.27" + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/description: >- + Deployments to a Kubernetes cluster with multiple availability zones often need to + distribute those replicas to align with those zones to ensure site-level failures + do not impact availability. This policy ensures topologySpreadConstraints are defined, + to spread pods over nodes and zones. Deployments or Statefulsets with less than 3 + replicas are skipped. + policies.kyverno.io/minversion: 1.11.0 + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Deployment, StatefulSet +spec: + background: true + failurePolicy: Ignore + validationFailureAction: Audit + rules: + - name: spread-pods + match: + any: + - resources: + kinds: + - Deployment + - StatefulSet + operations: + - CREATE + - UPDATE + celPreconditions: + - name: "replicas-must-be-3-or-more" + expression: "object.spec.replicas >= 3" + validate: + cel: + expressions: + - expression: >- + has(object.spec.template.spec.topologySpreadConstraints) && + size(object.spec.template.spec.topologySpreadConstraints.filter(t, t.topologyKey == 'kubernetes.io/hostname' || t.topologyKey == 'topology.kubernetes.io/zone')) == 2 + message: "topologySpreadConstraint for kubernetes.io/hostname & topology.kubernetes.io/zone are required" + + +``` diff --git a/content/en/policies/other/generate-networkpolicy-existing/generate-networkpolicy-existing.md b/content/en/policies/other/generate-networkpolicy-existing/generate-networkpolicy-existing.md index 87e1eadd9..8410ea4a2 100644 --- a/content/en/policies/other/generate-networkpolicy-existing/generate-networkpolicy-existing.md +++ b/content/en/policies/other/generate-networkpolicy-existing/generate-networkpolicy-existing.md @@ -31,7 +31,7 @@ metadata: is additional overhead. This policy creates a new NetworkPolicy for existing Namespaces which results in a default deny behavior and labels it with created-by=kyverno. spec: - generateExistingOnPolicyUpdate: true + generateExisting: true rules: - name: generate-existing-networkpolicy match: diff --git a/content/en/policies/other/mitigate-log4shell/mitigate-log4shell.md b/content/en/policies/other/mitigate-log4shell/mitigate-log4shell.md index ee4153b85..48f07a6c9 100644 --- a/content/en/policies/other/mitigate-log4shell/mitigate-log4shell.md +++ b/content/en/policies/other/mitigate-log4shell/mitigate-log4shell.md @@ -5,7 +5,7 @@ version: 1.6.0 subject: Pod policyType: "mutate" description: > - In response to CVE-2021-44228 referred to as Log4Shell, a RCE vulnerability in the Log4j library, a partial yet incomplete workaround for versions 2.10 to 2.14.1 of the library is to set the environment variable LOG4J_FORMAT_MSG_NO_LOOKUPS to "true". While this does provide some benefit by limiting exposure, there are still code paths which can exploit this vulnerability. It is highly recommended to upgrade log4j as soon as possible. See https://logging.apache.org/log4j/2.x/security.html for more details. This policy will mutate all initContainers and containers in an incoming Pod to add this environment variable automatically. + In response to CVE-2021-44228 referred to as Log4Shell, a RCE vulnerability in the Log4j library, a partial yet incomplete workaround for versions 2.10 to 2.14.1 of the library is to set the environment variable LOG4J_FORMAT_MSG_NO_LOOKUPS to "true". While this does provide some benefit by limiting exposure, there are still code paths which can exploit this vulnerability. It is highly recommended to upgrade log4j as soon as possible. See https://logging.apache.org/security.html for more details. This policy will mutate all initContainers and containers in an incoming Pod to add this environment variable automatically. --- ## Policy Definition @@ -29,7 +29,7 @@ metadata: variable LOG4J_FORMAT_MSG_NO_LOOKUPS to "true". While this does provide some benefit by limiting exposure, there are still code paths which can exploit this vulnerability. It is highly recommended to upgrade log4j as soon as possible. - See https://logging.apache.org/log4j/2.x/security.html for more details. + See https://logging.apache.org/security.html for more details. This policy will mutate all initContainers and containers in an incoming Pod to add this environment variable automatically. spec: diff --git a/content/en/policies/other/resource-creation-updating-denied/resource-creation-updating-denied.md b/content/en/policies/other/resource-creation-updating-denied/resource-creation-updating-denied.md new file mode 100644 index 000000000..806293a7a --- /dev/null +++ b/content/en/policies/other/resource-creation-updating-denied/resource-creation-updating-denied.md @@ -0,0 +1,58 @@ +--- +title: "Deny Creation and Updating of Resources" +category: Other +version: 1.9.0 +subject: Pod +policyType: "validate" +description: > + This policy denies the creation and updating of resources specifically for Deployment and Pod kinds during a specified time window. The policy is designed to enhance control over resource modifications during critical periods, ensuring stability and consistency within the Kubernetes environment. +--- + +## Policy Definition +/other/resource-creation-updating-denied/resource-creation-updating-denied.yaml + +```yaml +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: resource-creation-updating-denied + annotations: + policies.kyverno.io/title: Deny Creation and Updating of Resources + policies.kyverno.io/category: Other + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.1 + policies.kyverno.io/minversion: 1.9.0 + kyverno.io/kubernetes-version: "1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + This policy denies the creation and updating of resources specifically for Deployment + and Pod kinds during a specified time window. The policy is designed to enhance control + over resource modifications during critical periods, ensuring stability and consistency + within the Kubernetes environment. +spec: + validationFailureAction: Audit + background: false + rules: + - name: deny-creation-updating-of-resources + match: + any: + - resources: + kinds: + - Deployment + preconditions: + all: + - key: '{{ time_now_utc().time_to_cron(@).split(@,'' '') | [1].to_number(@) }}' + operator: AnyIn + value: 8-10 + validate: + message: Creating and updating resources is not allowed at this time. + deny: + conditions: + all: + - key: '{{request.operation}}' + operator: AnyIn + value: + - CREATE + - UPDATE + +``` diff --git a/content/en/policies/other/restrict-wildcard-verbs/restrict-wildcard-verbs.md b/content/en/policies/other/restrict-wildcard-verbs/restrict-wildcard-verbs.md index b72fb2732..f8cab811f 100644 --- a/content/en/policies/other/restrict-wildcard-verbs/restrict-wildcard-verbs.md +++ b/content/en/policies/other/restrict-wildcard-verbs/restrict-wildcard-verbs.md @@ -46,7 +46,8 @@ spec: deny: conditions: any: - - key: "{{ contains(request.object.rules[].verbs[], '*') }}" + - key: "{{ contains(to_array(request.object.rules[].verbs[]), '*') }}" operator: Equals value: true + ``` diff --git a/content/en/policies/pod-security-cel/baseline/disallow-capabilities/disallow-capabilities.md b/content/en/policies/pod-security-cel/baseline/disallow-capabilities/disallow-capabilities.md index 3fdb92d47..ad9512538 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-capabilities/disallow-capabilities.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-capabilities/disallow-capabilities.md @@ -35,6 +35,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/disallow-host-namespaces/disallow-host-namespaces.md b/content/en/policies/pod-security-cel/baseline/disallow-host-namespaces/disallow-host-namespaces.md index 515a4e14f..baaf0931b 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-host-namespaces/disallow-host-namespaces.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-host-namespaces/disallow-host-namespaces.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/disallow-host-path/disallow-host-path.md b/content/en/policies/pod-security-cel/baseline/disallow-host-path/disallow-host-path.md index 4381fa200..384450490 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-host-path/disallow-host-path.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-host-path/disallow-host-path.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/disallow-host-ports-range/disallow-host-ports-range.md b/content/en/policies/pod-security-cel/baseline/disallow-host-ports-range/disallow-host-ports-range.md index 61758a749..eb1ccf662 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-host-ports-range/disallow-host-ports-range.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-host-ports-range/disallow-host-ports-range.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/disallow-host-ports/disallow-host-ports.md b/content/en/policies/pod-security-cel/baseline/disallow-host-ports/disallow-host-ports.md index 17c2ab27a..a0b71f864 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-host-ports/disallow-host-ports.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-host-ports/disallow-host-ports.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/disallow-host-process/disallow-host-process.md b/content/en/policies/pod-security-cel/baseline/disallow-host-process/disallow-host-process.md index ee09dcf88..38b8683b5 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-host-process/disallow-host-process.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-host-process/disallow-host-process.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/disallow-privileged-containers/disallow-privileged-containers.md b/content/en/policies/pod-security-cel/baseline/disallow-privileged-containers/disallow-privileged-containers.md index 7864c0051..475d256d4 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-privileged-containers/disallow-privileged-containers.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-privileged-containers/disallow-privileged-containers.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/disallow-proc-mount/disallow-proc-mount.md b/content/en/policies/pod-security-cel/baseline/disallow-proc-mount/disallow-proc-mount.md index 5a4096508..536457ada 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-proc-mount/disallow-proc-mount.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-proc-mount/disallow-proc-mount.md @@ -39,6 +39,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/disallow-selinux/disallow-selinux.md b/content/en/policies/pod-security-cel/baseline/disallow-selinux/disallow-selinux.md index 891ba4a22..cdfa32589 100644 --- a/content/en/policies/pod-security-cel/baseline/disallow-selinux/disallow-selinux.md +++ b/content/en/policies/pod-security-cel/baseline/disallow-selinux/disallow-selinux.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: @@ -91,6 +94,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/restrict-seccomp/restrict-seccomp.md b/content/en/policies/pod-security-cel/baseline/restrict-seccomp/restrict-seccomp.md index 7bd8d8a67..dd85e0bf1 100644 --- a/content/en/policies/pod-security-cel/baseline/restrict-seccomp/restrict-seccomp.md +++ b/content/en/policies/pod-security-cel/baseline/restrict-seccomp/restrict-seccomp.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/baseline/restrict-sysctls/restrict-sysctls.md b/content/en/policies/pod-security-cel/baseline/restrict-sysctls/restrict-sysctls.md index 5ef91e0fc..47a68ccaa 100644 --- a/content/en/policies/pod-security-cel/baseline/restrict-sysctls/restrict-sysctls.md +++ b/content/en/policies/pod-security-cel/baseline/restrict-sysctls/restrict-sysctls.md @@ -41,6 +41,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/restricted/disallow-capabilities-strict/disallow-capabilities-strict.md b/content/en/policies/pod-security-cel/restricted/disallow-capabilities-strict/disallow-capabilities-strict.md index e324595ae..d315a8567 100644 --- a/content/en/policies/pod-security-cel/restricted/disallow-capabilities-strict/disallow-capabilities-strict.md +++ b/content/en/policies/pod-security-cel/restricted/disallow-capabilities-strict/disallow-capabilities-strict.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: message: >- Containers must drop `ALL` capabilities. @@ -67,6 +70,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/restricted/disallow-privilege-escalation/disallow-privilege-escalation.md b/content/en/policies/pod-security-cel/restricted/disallow-privilege-escalation/disallow-privilege-escalation.md index d81252f3b..5a546b099 100644 --- a/content/en/policies/pod-security-cel/restricted/disallow-privilege-escalation/disallow-privilege-escalation.md +++ b/content/en/policies/pod-security-cel/restricted/disallow-privilege-escalation/disallow-privilege-escalation.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/restricted/require-run-as-non-root-user/require-run-as-non-root-user.md b/content/en/policies/pod-security-cel/restricted/require-run-as-non-root-user/require-run-as-non-root-user.md index 69ac31885..11e4282ab 100644 --- a/content/en/policies/pod-security-cel/restricted/require-run-as-non-root-user/require-run-as-non-root-user.md +++ b/content/en/policies/pod-security-cel/restricted/require-run-as-non-root-user/require-run-as-non-root-user.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/restricted/require-run-as-nonroot/require-run-as-nonroot.md b/content/en/policies/pod-security-cel/restricted/require-run-as-nonroot/require-run-as-nonroot.md index 526b867af..b5c4628d8 100644 --- a/content/en/policies/pod-security-cel/restricted/require-run-as-nonroot/require-run-as-nonroot.md +++ b/content/en/policies/pod-security-cel/restricted/require-run-as-nonroot/require-run-as-nonroot.md @@ -37,6 +37,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/restricted/restrict-seccomp-strict/restrict-seccomp-strict.md b/content/en/policies/pod-security-cel/restricted/restrict-seccomp-strict/restrict-seccomp-strict.md index fe24b3ea9..2a33468a6 100644 --- a/content/en/policies/pod-security-cel/restricted/restrict-seccomp-strict/restrict-seccomp-strict.md +++ b/content/en/policies/pod-security-cel/restricted/restrict-seccomp-strict/restrict-seccomp-strict.md @@ -40,6 +40,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/policies/pod-security-cel/restricted/restrict-volume-types/restrict-volume-types.md b/content/en/policies/pod-security-cel/restricted/restrict-volume-types/restrict-volume-types.md index 7d3cba810..6058700e3 100644 --- a/content/en/policies/pod-security-cel/restricted/restrict-volume-types/restrict-volume-types.md +++ b/content/en/policies/pod-security-cel/restricted/restrict-volume-types/restrict-volume-types.md @@ -38,6 +38,9 @@ spec: - resources: kinds: - Pod + operations: + - CREATE + - UPDATE validate: cel: expressions: diff --git a/content/en/resources/_index.md b/content/en/resources/_index.md deleted file mode 100644 index c055aa91c..000000000 --- a/content/en/resources/_index.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: Resources -linkTitle: "Resources" -type: docs ---- - -## Videos - -{{% videos %}} - -{{< youtube id="T9pXLmb61Vw" start="0" class="video" >}} - -{{< youtube id="wY6JIBt6Cfg" start="0" class="video" >}} - -{{< youtube id="M_-r6vUKevQ" start="0" class="video" >}} - -{{% /videos %}} - -{{% videos %}} - -{{< youtube id="rVfLoueuCoY" start="0" class="video" >}} - -{{< youtube id="UVaWIvXjyrA" start="0" class="video" >}} - -{{< youtube id="dHhgfyH5KRs" start="0" class="video" >}} - -{{% /videos %}} - -{{% videos %}} - -{{< youtube id="ZrbJB2KgFAE" start="0" class="video" >}} - -{{< youtube id="sxASDgKu0dQ" start="0" class="video" >}} - -{{< youtube id="Bo8KhWhNY6g" start="0" class="video" >}} - -{{% /videos %}} - - -{{% videos %}} - -{{< youtube id="Y3m6cYS3ytM" start="0" class="video" >}} - -{{< youtube id="AmJUFH7n33c" start="0" class="video" >}} - -{{< youtube id="k98BVmemp_0" start="0" class="video" >}} - -{{% /videos %}} - - -{{% videos %}} - -{{< youtube id="Am7mvIQWx4E" start="0" class="video" >}} - -{{< youtube id="9gSrRNmmKBc" start="0" class="video" >}} - -{{< youtube id="DREjzfTzNpA" start="0" class="video" >}} - - -{{% /videos %}} - -{{% videos %}} - -{{< youtube id="DW2u6LhNMh0" start="116" class="video" >}} - -{{< youtube id="ZE4Zu9WQET4" start="1621" class="video" >}} - -{{< youtube id="Mukbfbr2b_k" start="1053" class="video" >}} - -{{% /videos %}} - - -{{% videos %}} - -{{< youtube id="GlqCW7uJ-7Q" start="0034" class="video" >}} - -{{< youtube id="0cJAfmQ7Emg" start="0020" class="video" >}} - -{{< youtube id="ZrOtTELNLyg" start="633" class="video" >}} - -{{% /videos %}} - -{{% videos %}} - -{{< youtube id="T9pXLmb61Vw" start="0" class="video" >}} - -{{< youtube id="SYbh_TQHeOA" start="0" class="video" >}} - -{{% /videos %}} - -## Video Playlists - -- Kyverno training from Nirmata -- Kyverno use cases - -## Meet the maintainers - -{{% videos %}} - -{{< youtube id="i_pemoAGEc8" start="0" class="video" >}} - -{{< youtube id="XNYkNL9d9hk" start="0" class="video" >}} - -{{< youtube id="FKSFuPGbKeY" start="0" class="video" >}} - -{{% /videos %}} - -## Blogs - -(latest to oldest) - -- Why Kubernetes Policies are a Day-0 concern, November 2022 -- Kyverno 1.8: Native Pod Security, YAML signing, and More!, October, 2022 -- Reloading Secrets and ConfigMaps with Kyverno Policies, September, 2022 -- Protect the pipe! Secure CI/CD pipelines with a policy-based approach using Tekton and Kyverno Policies, August 2022 -- The 2 minute test for Kubernetes Pod Security, July 2022 -- Governing Multi-Tenant Kubernetes Clusters with Kyverno, July 2022 -- Kyverno v1.7.0 Mutate and generate existing resources is here, June 2022 -- Securing Kubernetes with Kyverno, January 2022 -- No! You shall not pass! Kyverno’s here!, December 2021 -- Admission Controller for Secure Supply Chain Verification - Kyverno, December 2021 -- Enforcing Policy as Code using Kyverno in Kubernetes, October 202 -- K8s policy with Kyverno, October 202 -- A Zero Trust Approach for Securing the Supply Chain of Microservices Packaged as Container Images, September 2021 -- Automate Your Security Practices and Policies on OpenShift With Kyverno, September 2021 -- Kyverno - A Kubernetes native policy manager (Policy as Code), September 2021 -- Simplify Kubernetes Cluster Management with Kyverno, September 2021 -- Understanding Kyverno Policies, August 2021 -- Secure provisioning of LoadBalancer Services on Kubernetes using Kyverno, July 2021 -- Policy for Kubernetes Custom Resources, June 2021 -- Mirroring environments with gitops and Kyverno, April 2021 -- Kubernetes Policy Comparison: OPA/Gatekeeper vs Kyverno, February 2021 -- Self-Service Velero Backups with Kyverno, January 2021 -- Kyverno: The Swiss Army Knife of Kubernetes, January 2021 -- Auto-labeling Kubernetes resources with Kyverno, December 2020 -- Easy as one-two-three policy management with Kyverno on Amazon EKS, December 2020 -- Kyverno, a New CNCF Sandbox Project, Offers Kubernetes-Native Policy Management, November 2020 -- Exploring Kyverno Series, November 2020 -- Policy-as-Code on Kubernetes with Kyverno, November 2020 -- Generate policies! Make life hassle free with Kyverno, July 2020 -- Deny Rules! Fine Grained Access Controls with Kyverno, July 2020 -- 10 Kubernetes Best Practices, November 2019 -- Introducing Kyverno, July 2019 - -## Presentations - -- Kyverno Workshop, Kubernetes Community Days Bengaluru, April 2022 -- Kubernetes Philly, October 2021 - Kubernetes Policy Management with Kyverno, October 2021 -- Cloud Native Live: SLSA with Cosign and Kyverno to secure software delivery, October 2021 -- KubeCon 2021 US - Kyverno Office Hours, October 2021 -- SecurityCon 2021, October 2021 -- Multi-Tenancy WG - Namespaces-as-a-Service with Kyverno and HNC, Jan 2021 -- CNCF Webinar - Keep Your Clusters Safe and Healthy, May 2020 -- VMware Code Meetup Video, April 2020 -- Virtual Rejekts 2020 EU Presentation, May 2020 diff --git a/go.mod b/go.mod index 64bc1fa5b..0b3f6ec69 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/kyverno/website go 1.19 require ( - github.com/FortAwesome/Font-Awesome v0.0.0-20230327165841-0698449d50f2 // indirect - github.com/google/docsy v0.8.0 // indirect + github.com/FortAwesome/Font-Awesome v0.0.0-20240402185447-c0f460dca7f7 // indirect + github.com/google/docsy v0.10.0 // indirect github.com/google/docsy/dependencies v0.7.2 // indirect - github.com/twbs/bootstrap v5.3.2+incompatible // indirect + github.com/twbs/bootstrap v5.3.3+incompatible // indirect ) diff --git a/go.sum b/go.sum index c1be65c1a..b6669c15e 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/FortAwesome/Font-Awesome v0.0.0-20210804190922-7d3d774145ac/go.mod h1 github.com/FortAwesome/Font-Awesome v0.0.0-20220831210243-d3a7818c253f/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= github.com/FortAwesome/Font-Awesome v0.0.0-20230327165841-0698449d50f2 h1:Uv1z5EqCfmiK4IHUwT0m3h/u/WCk+kpRfxvAZhpC7Gc= github.com/FortAwesome/Font-Awesome v0.0.0-20230327165841-0698449d50f2/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= +github.com/FortAwesome/Font-Awesome v0.0.0-20240402185447-c0f460dca7f7 h1:2aWEKCRLqQ9nPyXaz4/IYtRrDr3PzEiX0DUSUr2/EDs= +github.com/FortAwesome/Font-Awesome v0.0.0-20240402185447-c0f460dca7f7/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= github.com/google/docsy v0.4.0 h1:Eyt2aiDC1fnw/Qq/9xnIqUU5n5Yyk4c8gX3nBDdTv/4= github.com/google/docsy v0.4.0/go.mod h1:vJjGkHNaw9bO42gpFTWwAUzHZWZEVlK46Kx7ikY5c7Y= github.com/google/docsy v0.6.0 h1:43bVF18t2JihAamelQjjGzx1vO2ljCilVrBgetCA8oI= @@ -12,6 +14,8 @@ github.com/google/docsy v0.7.1 h1:DUriA7Nr3lJjNi9Ulev1SfiG1sUYmvyDeU4nTp7uDxY= github.com/google/docsy v0.7.1/go.mod h1:JCmE+c+izhE0Rvzv3y+AzHhz1KdwlA9Oj5YBMklJcfc= github.com/google/docsy v0.8.0 h1:RgHyKRTo8YwScMThrf01Ky2yCGpUS1hpkspwNv6szT4= github.com/google/docsy v0.8.0/go.mod h1:FqTNN2T7pWEGW8dc+v5hQ5VF29W5uaL00PQ1LdVw5F8= +github.com/google/docsy v0.10.0 h1:6tMDacPwAyRWNCfvsn/9qGOZDQ8b0aRzjRZvnZPY5dg= +github.com/google/docsy v0.10.0/go.mod h1:c0nIAqmRTOuJ01F85U/wJPQtc3Zj9N58Kea9bOT2AJc= github.com/google/docsy/dependencies v0.4.0 h1:FXwyjtuFfPIPBauU2t7uIAgS6VYfJf+OD5pzxGvkQsQ= github.com/google/docsy/dependencies v0.4.0/go.mod h1:2zZxHF+2qvkyXhLZtsbnqMotxMukJXLaf8fAZER48oo= github.com/google/docsy/dependencies v0.6.0 h1:BFXDCINbp8ZuUGl/mrHjMfhCg+b1YX+hVLAA5fGW7Pc= @@ -27,3 +31,5 @@ github.com/twbs/bootstrap v4.6.2+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspT github.com/twbs/bootstrap v5.2.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= github.com/twbs/bootstrap v5.3.2+incompatible h1:tuiO5acc6xnZUR77Sbi5aKWXxjYxbmsSbJwYrhAKoQQ= github.com/twbs/bootstrap v5.3.2+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= +github.com/twbs/bootstrap v5.3.3+incompatible h1:goFoqinzdHfkeegpFP7pvhbd0g+A3O2hbU3XCjuNrEQ= +github.com/twbs/bootstrap v5.3.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= diff --git a/layouts/_default/_markup/render-heading.html b/layouts/_default/_markup/render-heading.html new file mode 100644 index 000000000..d06f54eb3 --- /dev/null +++ b/layouts/_default/_markup/render-heading.html @@ -0,0 +1,6 @@ + + {{ .Text | safeHTML }} + + + + diff --git a/layouts/partials/footer.html b/layouts/partials/footer.html index 368624fd2..174610ca7 100644 --- a/layouts/partials/footer.html +++ b/layouts/partials/footer.html @@ -12,7 +12,7 @@

Created by - + Nirmata @@ -22,6 +22,7 @@ {{ with .Site.Params.copyright }}© {{ now.Year}} {{ .}} {{ T "footer_all_rights_reserved" }}{{ end }}

+ {{ define "footer-links-block" }} diff --git a/netlify.toml b/netlify.toml index 77fa86e0b..ea2573b16 100644 --- a/netlify.toml +++ b/netlify.toml @@ -45,4 +45,9 @@ status = 200 [[headers]] for = "/*" [headers.values] - X-Frame-Options = "SAMEORIGIN" \ No newline at end of file + X-Frame-Options = "SAMEORIGIN" + +[[headers]] + for = "/blog/index.xml" + [headers.values] + access-control-allow-origin = "*" diff --git a/render/render.go b/render/render.go index 94e0f6c54..674d37422 100644 --- a/render/render.go +++ b/render/render.go @@ -136,6 +136,11 @@ func render(git *gitInfo, outdir string) error { if err != nil { return fmt.Errorf("failed to clean directory %s: %v", outdir, err) } + + err = removeEmptyDirs(outdir) + if err != nil { + return fmt.Errorf("failed to clean directory %s: %v", outdir, err) + } } for _, yamlFilePath := range yamls { @@ -205,45 +210,94 @@ func render(git *gitInfo, outdir string) error { return nil } -// deleteMarkdownFiles deletes all .md files except "_index.md" -func deleteMarkdownFiles(outdir string) error { - d, err := os.Open(outdir) +// removeEmptyDirs collects directories and deletes empty ones from deepest to shallowest +func removeEmptyDirs(dir string) error { + var dirs []string + + // First, traverse the directory tree to collect directories + err := filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + dirs = append(dirs, path) + } + return nil + }) if err != nil { return err } - defer func() { - if err := d.Close(); err != nil { + // Sort directories by depth (deepest directories first) + sort.Slice(dirs, func(i, j int) bool { + return len(dirs[i]) > len(dirs[j]) + }) + + // Attempt to delete directories, starting from the deepest + for _, path := range dirs { + empty, err := isEmptyDir(path) + if err != nil { + return err + } + if empty { if Verbose { - log.Printf("failed to close output dir %s: %v", outdir, err) + log.Printf("Removing empty directory: %s\n", path) + } + err := os.Remove(path) + if err != nil { + fmt.Printf("Failed to remove directory %s: %v", path, err) } } - }() + } + + return nil +} - files, err := d.Readdir(-1) +// isEmptyDir checks if a directory is empty +func isEmptyDir(dirPath string) (bool, error) { + entries, err := os.ReadDir(dirPath) if err != nil { - return err + return false, err } + return len(entries) == 0, nil +} - if Verbose { - log.Printf("cleaning directory %s", outdir) - } +// deleteMarkdownFiles deletes all .md files except "_index.md" +func deleteMarkdownFiles(outdir string) error { + // Walk through the directory and its subdirectories + err := filepath.WalkDir(outdir, func(path string, d os.DirEntry, err error) error { + if err != nil { + return err + } - for _, file := range files { - if file.Mode().IsRegular() { - name := file.Name() - if filepath.Ext(name) == ".md" { - if filepath.Base(name) == "_index.md" { - continue - } + // Process only files + if d.IsDir() { + return nil + } - if err := os.Remove(name); err != nil { - if Verbose { - log.Printf("failed to delete file %s: %v", name, err) - } + name := d.Name() + if filepath.Ext(name) == ".md" { + // Skip _index.md files + if filepath.Base(name) == "_index.md" { + return nil + } + + if err := os.Remove(path); err != nil { + if Verbose { + log.Printf("failed to delete file %s: %v", path, err) } } } + + return nil + }) + + if err != nil { + return err + } + + if Verbose { + log.Printf("cleaned directory %s", outdir) } return nil diff --git a/static/images/kyverno-architecture.png b/static/images/kyverno-architecture.png index 10c7d9c70..98cd3346a 100644 Binary files a/static/images/kyverno-architecture.png and b/static/images/kyverno-architecture.png differ diff --git a/static/images/kyverno_website_graphics.pptx b/static/images/kyverno_website_graphics.pptx index d7ff0d65e..023b6a28c 100644 Binary files a/static/images/kyverno_website_graphics.pptx and b/static/images/kyverno_website_graphics.pptx differ