Skip to content

Commit

Permalink
fix: reduce RBAC access granted to controller service account
Browse files Browse the repository at this point in the history
Reduce the access to the cluster given to the ServiceAccount
used by the controller.

This leverages the changes introduced by kubernetes-sigs/controller-runtime#1435
to allow controllers to reduce the scope of watches.

Signed-off-by: Flavio Castelli <fcastelli@suse.com>
  • Loading branch information
flavio committed Apr 13, 2023
1 parent 7584def commit 4394600
Show file tree
Hide file tree
Showing 5 changed files with 134 additions and 48 deletions.
90 changes: 53 additions & 37 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,52 +28,32 @@ rules:
- update
- watch
- apiGroups:
- apps
- policies.kubewarden.io
resources:
- deployments
- admissionpolicies
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
- policies.kubewarden.io
resources:
- deployments
- replicasets
- admissionpolicies/finalizers
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
- policies.kubewarden.io
resources:
- configmaps
- secrets
- services
- admissionpolicies/status
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- policies.kubewarden.io
resources:
- admissionpolicies
- clusteradmissionpolicies
verbs:
- delete
- get
Expand All @@ -82,40 +62,76 @@ rules:
- apiGroups:
- policies.kubewarden.io
resources:
- admissionpolicies/finalizers
- clusteradmissionpolicies/finalizers
verbs:
- update
- apiGroups:
- policies.kubewarden.io
resources:
- admissionpolicies/status
- clusteradmissionpolicies/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
name: manager-role
namespace: kubewarden
rules:
- apiGroups:
- policies.kubewarden.io
- apps
resources:
- clusteradmissionpolicies
- deployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- policies.kubewarden.io
- apps
resources:
- clusteradmissionpolicies/finalizers
- deployments
- replicasets
verbs:
- update
- get
- list
- watch
- apiGroups:
- policies.kubewarden.io
- apps
resources:
- clusteradmissionpolicies/status
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- secrets
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- policies.kubewarden.io
resources:
Expand Down
14 changes: 12 additions & 2 deletions controllers/admissionpolicy_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,21 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
)

// Warning: these controller is deployed by a helm chart which has its own
// templated RBAC rules. The rules are kept in sync between what is generated by
// `make manifests` and the helm chart by hand.
//
// We need access to these resources inside of all the namespaces -> a ClusterRole
// is needed
//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=admissionpolicies,verbs=get;list;watch;delete
//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=admissionpolicies/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=admissionpolicies/finalizers,verbs=update
//+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
//+kubebuilder:rbac:groups=apps,resources=replicasets;deployments,verbs=get;list;watch
//
// We need access to these resources only inside of the namespace where the
// controller is deployed. Here we assume it's being deployed inside of the
// `kubewarden` namespace, this has to be parametrized in the helm chart
//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=pods,verbs=get;list;watch
//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=replicasets;deployments,verbs=get;list;watch

// AdmissionPolicyReconciler reconciles an AdmissionPolicy object
type AdmissionPolicyReconciler struct {
Expand Down
14 changes: 12 additions & 2 deletions controllers/clusteradmissionpolicy_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,21 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
)

// Warning: these controller is deployed by a helm chart which has its own
// templated RBAC rules. The rules are kept in sync between what is generated by
// `make manifests` and the helm chart by hand.
//
// We need access to these resources inside of all the namespaces -> a ClusterRole
// is needed
//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=clusteradmissionpolicies,verbs=get;list;watch;delete
//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=clusteradmissionpolicies/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=clusteradmissionpolicies/finalizers,verbs=update
//+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
//+kubebuilder:rbac:groups=apps,resources=replicasets;deployments,verbs=get;list;watch
//
// We need access to these resources only inside of the namespace where the
// controller is deployed. Here we assume it's being deployed inside of the
// `kubewarden` namespace, this has to be parametrized in the helm chart
//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=pods,verbs=get;list;watch
//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=replicasets;deployments,verbs=get;list;watch

// ClusterAdmissionPolicyReconciler reconciles a ClusterAdmissionPolicy object
type ClusterAdmissionPolicyReconciler struct {
Expand Down
19 changes: 13 additions & 6 deletions controllers/policyserver_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,20 @@ type PolicyServerReconciler struct {
Reconciler admission.Reconciler
}

//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=policyservers,verbs=get;list;watch;delete
//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=policyservers/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=policyservers/finalizers,verbs=update
// Warning: these controller is deployed by a helm chart which has its own
// templated RBAC rules. The rules are kept in sync between what is generated by
// `make manifests` and the helm chart by hand.
//
// The following ought to be part of kubewarden-controller-manager-namespaced-role:
//+kubebuilder:rbac:groups=core,resources=secrets;services;configmaps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;delete;update;patch
// We need access to these resources only inside of the namespace where the
// controller is deployed. Here we assume it's being deployed inside of the
// `kubewarden` namespace, this has to be parametrized in the helm chart
//+kubebuilder:rbac:namespace=kubewarden,groups=policies.kubewarden.io,resources=policyservers,verbs=get;list;watch;delete
//+kubebuilder:rbac:namespace=kubewarden,groups=policies.kubewarden.io,resources=policyservers/status,verbs=get;update;patch
//+kubebuilder:rbac:namespace=kubewarden,groups=policies.kubewarden.io,resources=policyservers/finalizers,verbs=update
//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=secrets;services;configmaps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=replicasets,verbs=get;list;watch
//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=pods,verbs=get;list;watch

func (r *PolicyServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var policyServer policiesv1.PolicyServer
Expand Down
45 changes: 44 additions & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package main
import (
"context"
"flag"
"fmt"
"os"
"time"

Expand All @@ -31,10 +32,12 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"

"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
Expand Down Expand Up @@ -124,6 +127,10 @@ func main() {
}()
}

namespaceSelector := cache.ObjectSelector{
Field: fields.ParseSelectorOrDie(fmt.Sprintf("metadata.namespace=%s", deploymentsNamespace)),
}

mgr, err := webhookwrapper.NewManager(
ctrl.Options{
Scheme: scheme,
Expand All @@ -133,7 +140,43 @@ func main() {
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "a4ddbf36.kubewarden.io",
ClientDisableCacheFor: []client.Object{&corev1.ConfigMap{}, &appsv1.Deployment{}},
// Warning: the manager creates a client, which then uses Watches to monitor
// certain resources. By default, the client is not going to be namespaced,
// it will be able to watch resources across the entire cluster. This is of
// course constrained by the RBAC rules applied to the ServiceAccount that
// runs the controller.
// **However**, even when accessing a resource inside of a specific Namespace,
// the default behaviour of the cache is to create a Watch that is not namespaced;
// hence requires the privilege to access all the resources of that type inside
// of the cluster. That can cause runtime error if the ServiceAccount lacking
// this privilege.
// For example, when we access a secret inside the `kubewarden`
// namespace, the cache will create a Watch against Secrets, that will require
// privileged to acccess ALL the secrets of the cluster.
//
// To be able to have stricter RBAC rules, we need to instruct the cache to
// only watch objects inside of the namespace where the controller is running.
// That applies ONLY to the namespaced resources that we know the controller
// is going to own inside of a specific namespace.
// For example, Secret resources are going to be defined by the controller
// only inside of the `kubewarden` namespace; hence their watch can be namespaced.
// On the other hand, AdmissionPolicy resources are namespaced, but the controller
// requires to access them across all the namespaces of the cluster; hence the
// cache must not be namespaced.
NewCache: cache.BuilderWithOptions(cache.Options{
SelectorsByObject: map[client.Object]cache.ObjectSelector{
&appsv1.ReplicaSet{}: namespaceSelector,
&corev1.Secret{}: namespaceSelector,
&corev1.Pod{}: namespaceSelector,
&corev1.Service{}: namespaceSelector,
},
}),
// These types of resources should never be cached because we need fresh
// data coming from the cliet. This is required to perform the rollout
// of the PolicyServer Deployment whenever a policy is added/changed/removed.
// Because of that, there's not need to scope these resources inside
// of the cache, like we did for Pods, Services,... right above.
ClientDisableCacheFor: []client.Object{&corev1.ConfigMap{}, &appsv1.Deployment{}},
},
setupLog,
environment.developmentMode,
Expand Down

0 comments on commit 4394600

Please sign in to comment.