From 43946004c0a284b3e5619312c0b1c819805d8c71 Mon Sep 17 00:00:00 2001 From: Flavio Castelli Date: Thu, 13 Apr 2023 11:33:47 +0200 Subject: [PATCH] fix: reduce RBAC access granted to controller service account Reduce the access to the cluster given to the ServiceAccount used by the controller. This leverages the changes introduced by kubernetes-sigs/controller-runtime#1435 to allow controllers to reduce the scope of watches. Signed-off-by: Flavio Castelli --- config/rbac/role.yaml | 90 +++++++++++-------- controllers/admissionpolicy_controller.go | 14 ++- .../clusteradmissionpolicy_controller.go | 14 ++- controllers/policyserver_controller.go | 19 ++-- main.go | 45 +++++++++- 5 files changed, 134 insertions(+), 48 deletions(-) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index fe041947..08fc17a0 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -28,52 +28,32 @@ rules: - update - watch - apiGroups: - - apps + - policies.kubewarden.io resources: - - deployments + - admissionpolicies verbs: - - create - delete - get - list - - patch - - update - watch - apiGroups: - - apps + - policies.kubewarden.io resources: - - deployments - - replicasets + - admissionpolicies/finalizers verbs: - - get - - list - - watch + - update - apiGroups: - - "" + - policies.kubewarden.io resources: - - configmaps - - secrets - - services + - admissionpolicies/status verbs: - - create - - delete - get - - list - patch - update - - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - apiGroups: - policies.kubewarden.io resources: - - admissionpolicies + - clusteradmissionpolicies verbs: - delete - get @@ -82,40 +62,76 @@ rules: - apiGroups: - policies.kubewarden.io resources: - - admissionpolicies/finalizers + - clusteradmissionpolicies/finalizers verbs: - update - apiGroups: - policies.kubewarden.io resources: - - admissionpolicies/status + - clusteradmissionpolicies/status verbs: - get - patch - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + name: manager-role + namespace: kubewarden +rules: - apiGroups: - - policies.kubewarden.io + - apps resources: - - clusteradmissionpolicies + - deployments verbs: + - create - delete - get - list + - patch + - update - watch - apiGroups: - - policies.kubewarden.io + - apps resources: - - clusteradmissionpolicies/finalizers + - deployments + - replicasets verbs: - - update + - get + - list + - watch - apiGroups: - - policies.kubewarden.io + - apps resources: - - clusteradmissionpolicies/status + - replicasets verbs: - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + - secrets + - services + verbs: + - create + - delete + - get + - list - patch - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch - apiGroups: - policies.kubewarden.io resources: diff --git a/controllers/admissionpolicy_controller.go b/controllers/admissionpolicy_controller.go index ee7c680b..8516629e 100644 --- a/controllers/admissionpolicy_controller.go +++ b/controllers/admissionpolicy_controller.go @@ -36,11 +36,21 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) +// Warning: these controller is deployed by a helm chart which has its own +// templated RBAC rules. The rules are kept in sync between what is generated by +// `make manifests` and the helm chart by hand. +// +// We need access to these resources inside of all the namespaces -> a ClusterRole +// is needed //+kubebuilder:rbac:groups=policies.kubewarden.io,resources=admissionpolicies,verbs=get;list;watch;delete //+kubebuilder:rbac:groups=policies.kubewarden.io,resources=admissionpolicies/status,verbs=get;update;patch //+kubebuilder:rbac:groups=policies.kubewarden.io,resources=admissionpolicies/finalizers,verbs=update -//+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch -//+kubebuilder:rbac:groups=apps,resources=replicasets;deployments,verbs=get;list;watch +// +// We need access to these resources only inside of the namespace where the +// controller is deployed. Here we assume it's being deployed inside of the +// `kubewarden` namespace, this has to be parametrized in the helm chart +//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=pods,verbs=get;list;watch +//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=replicasets;deployments,verbs=get;list;watch // AdmissionPolicyReconciler reconciles an AdmissionPolicy object type AdmissionPolicyReconciler struct { diff --git a/controllers/clusteradmissionpolicy_controller.go b/controllers/clusteradmissionpolicy_controller.go index d6f76042..5b6c01fc 100644 --- a/controllers/clusteradmissionpolicy_controller.go +++ b/controllers/clusteradmissionpolicy_controller.go @@ -36,11 +36,21 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) +// Warning: these controller is deployed by a helm chart which has its own +// templated RBAC rules. The rules are kept in sync between what is generated by +// `make manifests` and the helm chart by hand. +// +// We need access to these resources inside of all the namespaces -> a ClusterRole +// is needed //+kubebuilder:rbac:groups=policies.kubewarden.io,resources=clusteradmissionpolicies,verbs=get;list;watch;delete //+kubebuilder:rbac:groups=policies.kubewarden.io,resources=clusteradmissionpolicies/status,verbs=get;update;patch //+kubebuilder:rbac:groups=policies.kubewarden.io,resources=clusteradmissionpolicies/finalizers,verbs=update -//+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch -//+kubebuilder:rbac:groups=apps,resources=replicasets;deployments,verbs=get;list;watch +// +// We need access to these resources only inside of the namespace where the +// controller is deployed. Here we assume it's being deployed inside of the +// `kubewarden` namespace, this has to be parametrized in the helm chart +//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=pods,verbs=get;list;watch +//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=replicasets;deployments,verbs=get;list;watch // ClusterAdmissionPolicyReconciler reconciles a ClusterAdmissionPolicy object type ClusterAdmissionPolicyReconciler struct { diff --git a/controllers/policyserver_controller.go b/controllers/policyserver_controller.go index 02e2a1e6..3c0fac7b 100644 --- a/controllers/policyserver_controller.go +++ b/controllers/policyserver_controller.go @@ -44,13 +44,20 @@ type PolicyServerReconciler struct { Reconciler admission.Reconciler } -//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=policyservers,verbs=get;list;watch;delete -//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=policyservers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=policies.kubewarden.io,resources=policyservers/finalizers,verbs=update +// Warning: these controller is deployed by a helm chart which has its own +// templated RBAC rules. The rules are kept in sync between what is generated by +// `make manifests` and the helm chart by hand. // -// The following ought to be part of kubewarden-controller-manager-namespaced-role: -//+kubebuilder:rbac:groups=core,resources=secrets;services;configmaps,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;delete;update;patch +// We need access to these resources only inside of the namespace where the +// controller is deployed. Here we assume it's being deployed inside of the +// `kubewarden` namespace, this has to be parametrized in the helm chart +//+kubebuilder:rbac:namespace=kubewarden,groups=policies.kubewarden.io,resources=policyservers,verbs=get;list;watch;delete +//+kubebuilder:rbac:namespace=kubewarden,groups=policies.kubewarden.io,resources=policyservers/status,verbs=get;update;patch +//+kubebuilder:rbac:namespace=kubewarden,groups=policies.kubewarden.io,resources=policyservers/finalizers,verbs=update +//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=secrets;services;configmaps,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=replicasets,verbs=get;list;watch +//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=pods,verbs=get;list;watch func (r *PolicyServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { var policyServer policiesv1.PolicyServer diff --git a/main.go b/main.go index 8f3636fd..074b63bd 100644 --- a/main.go +++ b/main.go @@ -19,6 +19,7 @@ package main import ( "context" "flag" + "fmt" "os" "time" @@ -31,10 +32,12 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -124,6 +127,10 @@ func main() { }() } + namespaceSelector := cache.ObjectSelector{ + Field: fields.ParseSelectorOrDie(fmt.Sprintf("metadata.namespace=%s", deploymentsNamespace)), + } + mgr, err := webhookwrapper.NewManager( ctrl.Options{ Scheme: scheme, @@ -133,7 +140,43 @@ func main() { HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "a4ddbf36.kubewarden.io", - ClientDisableCacheFor: []client.Object{&corev1.ConfigMap{}, &appsv1.Deployment{}}, + // Warning: the manager creates a client, which then uses Watches to monitor + // certain resources. By default, the client is not going to be namespaced, + // it will be able to watch resources across the entire cluster. This is of + // course constrained by the RBAC rules applied to the ServiceAccount that + // runs the controller. + // **However**, even when accessing a resource inside of a specific Namespace, + // the default behaviour of the cache is to create a Watch that is not namespaced; + // hence requires the privilege to access all the resources of that type inside + // of the cluster. That can cause runtime error if the ServiceAccount lacking + // this privilege. + // For example, when we access a secret inside the `kubewarden` + // namespace, the cache will create a Watch against Secrets, that will require + // privileged to acccess ALL the secrets of the cluster. + // + // To be able to have stricter RBAC rules, we need to instruct the cache to + // only watch objects inside of the namespace where the controller is running. + // That applies ONLY to the namespaced resources that we know the controller + // is going to own inside of a specific namespace. + // For example, Secret resources are going to be defined by the controller + // only inside of the `kubewarden` namespace; hence their watch can be namespaced. + // On the other hand, AdmissionPolicy resources are namespaced, but the controller + // requires to access them across all the namespaces of the cluster; hence the + // cache must not be namespaced. + NewCache: cache.BuilderWithOptions(cache.Options{ + SelectorsByObject: map[client.Object]cache.ObjectSelector{ + &appsv1.ReplicaSet{}: namespaceSelector, + &corev1.Secret{}: namespaceSelector, + &corev1.Pod{}: namespaceSelector, + &corev1.Service{}: namespaceSelector, + }, + }), + // These types of resources should never be cached because we need fresh + // data coming from the cliet. This is required to perform the rollout + // of the PolicyServer Deployment whenever a policy is added/changed/removed. + // Because of that, there's not need to scope these resources inside + // of the cache, like we did for Pods, Services,... right above. + ClientDisableCacheFor: []client.Object{&corev1.ConfigMap{}, &appsv1.Deployment{}}, }, setupLog, environment.developmentMode,