diff --git a/manifests/klusterlet/managed/klusterlet-work-clusterrole-execution.yaml b/manifests/klusterlet/managed/klusterlet-work-clusterrole-execution.yaml index 4b6aa6a9c..e81a670ab 100644 --- a/manifests/klusterlet/managed/klusterlet-work-clusterrole-execution.yaml +++ b/manifests/klusterlet/managed/klusterlet-work-clusterrole-execution.yaml @@ -4,6 +4,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: open-cluster-management:{{ .KlusterletName }}-work:execution + labels: + open-cluster-management.io/aggregate-to-work: "true" rules: # Allow agent to get/list/watch/create/delete crds. - apiGroups: ["apiextensions.k8s.io"] diff --git a/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml b/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml index e14347152..e18f9b10a 100644 --- a/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml +++ b/manifests/klusterlet/managed/klusterlet-work-clusterrolebinding-execution.yaml @@ -7,7 +7,7 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: open-cluster-management:{{ .KlusterletName }}-work:execution + name: open-cluster-management:{{ .KlusterletName }}-work:aggregate subjects: - kind: ServiceAccount name: {{ .WorkServiceAccount }} diff --git a/manifests/klusterlet/management/klusterlet-agent-deployment.yaml b/manifests/klusterlet/management/klusterlet-agent-deployment.yaml index 42745244d..27ed50ec2 100644 --- a/manifests/klusterlet/management/klusterlet-agent-deployment.yaml +++ b/manifests/klusterlet/management/klusterlet-agent-deployment.yaml @@ -35,7 +35,7 @@ spec: - key: app operator: In values: - - klusterlet-registration-agent + - klusterlet-agent - weight: 30 podAffinityTerm: topologyKey: kubernetes.io/hostname @@ -44,8 +44,8 @@ spec: - key: app operator: In values: - - klusterlet-registration-agent - serviceAccountName: {{ .KlusterletName }}-agent-sa + - klusterlet-agent + serviceAccountName: {{ .KlusterletName }}-work-sa containers: - name: klusterlet-agent image: {{ .SingletonImage }} diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go index a7dee3f39..92f9e549d 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go @@ -49,9 +49,9 @@ func TestSyncDelete(t *testing.T) { } } - // 11 managed static manifests + 11 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments - if len(deleteActions) != 27 { - t.Errorf("Expected 27 delete actions, but got %d", len(deleteActions)) + // 11 managed static manifests + 12 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + if len(deleteActions) != 28 { + t.Errorf("Expected 28 delete actions, but got %d", len(deleteActions)) } var updateWorkActions []clienttesting.PatchActionImpl @@ -123,9 +123,9 @@ func TestSyncDeleteHosted(t *testing.T) { } } - // 11 static manifests + 2 namespaces - if len(deleteActionsManaged) != 13 { - t.Errorf("Expected 13 delete actions, but got %d", len(deleteActionsManaged)) + // 12 static manifests + 2 namespaces + if len(deleteActionsManaged) != 14 { + t.Errorf("Expected 14 delete actions, but got %d", len(deleteActionsManaged)) } var updateWorkActions []clienttesting.PatchActionImpl diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go index eb90932b3..ad1aa663d 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go @@ -385,9 +385,11 @@ func ensureNamespace(ctx context.Context, kubeClient kubernetes.Interface, klust func serviceAccountName(suffix string, klusterlet *operatorapiv1.Klusterlet) string { // in singleton mode, we only need one sa, so the name of work and registration sa are - // the same. + // the same. We need to use the name of work sa for now, since the work sa permission can be + // escalated by create manifestwork from other actors. + // TODO(qiujian16) revisit to see if we can use inpersonate in work agent. if helpers.IsSingleton(klusterlet.Spec.DeployOption.Mode) { - return fmt.Sprintf("%s-agent-sa", klusterlet.Name) + return fmt.Sprintf("%s-work-sa", klusterlet.Name) } return fmt.Sprintf("%s-%s", klusterlet.Name, suffix) } diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go index 7310b8674..6889d3a1e 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go @@ -508,9 +508,9 @@ func TestSyncDeploy(t *testing.T) { } // Check if resources are created as expected - // 11 managed static manifests + 11 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments - if len(createObjects) != 23 { - t.Errorf("Expect 23 objects created in the sync loop, actual %d", len(createObjects)) + // 11 managed static manifests + 12 management static manifests - 2 duplicated service account manifests + 1 addon namespace + 2 deployments + if len(createObjects) != 24 { + t.Errorf("Expect 24 objects created in the sync loop, actual %d", len(createObjects)) } for _, object := range createObjects { ensureObject(t, object, klusterlet) @@ -569,8 +569,8 @@ func TestSyncDeploySingleton(t *testing.T) { } // Check if resources are created as expected - // 10 managed static manifests + 10 management static manifests - 1 service account manifests + 1 addon namespace + 1 deployments - if len(createObjects) != 21 { + // 10 managed static manifests + 11 management static manifests - 1 service account manifests + 1 addon namespace + 1 deployments + if len(createObjects) != 22 { t.Errorf("Expect 21 objects created in the sync loop, actual %d", len(createObjects)) } for _, object := range createObjects { @@ -658,9 +658,9 @@ func TestSyncDeployHosted(t *testing.T) { } } // Check if resources are created as expected on the managed cluster - // 11 static manifests + 2 namespaces + 1 pull secret in the addon namespace - if len(createObjectsManaged) != 14 { - t.Errorf("Expect 14 objects created in the sync loop, actual %d", len(createObjectsManaged)) + // 12 static manifests + 2 namespaces + 1 pull secret in the addon namespace + if len(createObjectsManaged) != 15 { + t.Errorf("Expect 15 objects created in the sync loop, actual %d", len(createObjectsManaged)) } for _, object := range createObjectsManaged { ensureObject(t, object, klusterlet) @@ -1014,10 +1014,10 @@ func TestDeployOnKube111(t *testing.T) { } // Check if resources are created as expected - // 11 managed static manifests + 11 management static manifests - + // 12 managed static manifests + 11 management static manifests - // 2 duplicated service account manifests + 1 addon namespace + 2 deployments + 2 kube111 clusterrolebindings - if len(createObjects) != 25 { - t.Errorf("Expect 25 objects created in the sync loop, actual %d", len(createObjects)) + if len(createObjects) != 26 { + t.Errorf("Expect 26 objects created in the sync loop, actual %d", len(createObjects)) } for _, object := range createObjects { ensureObject(t, object, klusterlet) @@ -1059,9 +1059,9 @@ func TestDeployOnKube111(t *testing.T) { } } - // 11 managed static manifests + 11 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + 2 kube111 clusterrolebindings - if len(deleteActions) != 29 { - t.Errorf("Expected 29 delete actions, but got %d", len(deleteActions)) + // 12 managed static manifests + 11 management static manifests + 1 hub kubeconfig + 2 namespaces + 2 deployments + 2 kube111 clusterrolebindings + if len(deleteActions) != 30 { + t.Errorf("Expected 30 delete actions, but got %d", len(deleteActions)) } } diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go index a2e332130..1e0deab31 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go @@ -11,6 +11,7 @@ import ( "github.com/openshift/library-go/pkg/assets" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -119,6 +120,11 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap } } + // add aggregation clusterrole for work, this is not allowed in library-go for now, so need an additional creating + if err := r.createAggregationRule(ctx, klusterlet); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { applyErrors := utilerrors.NewAggregate(errs) meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ @@ -131,6 +137,32 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap return klusterlet, reconcileContinue, nil } +func (r *managedReconcile) createAggregationRule(ctx context.Context, klusterlet *operatorapiv1.Klusterlet) error { + aggregateClusterRoleName := fmt.Sprintf("open-cluster-management:%s-work:aggregate", klusterlet.Name) + _, err := r.managedClusterClients.kubeClient.RbacV1().ClusterRoles().Get(ctx, aggregateClusterRoleName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + aggregateClusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: aggregateClusterRoleName, + }, + AggregationRule: &rbacv1.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{ + { + MatchLabels: map[string]string{ + "open-cluster-management.io/aggregate-to-work": "true", + }, + }, + }, + }, + Rules: []rbacv1.PolicyRule{}, + } + _, createErr := r.managedClusterClients.kubeClient.RbacV1().ClusterRoles().Create(ctx, aggregateClusterRole, metav1.CreateOptions{}) + return createErr + } + + return nil +} + func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1.Klusterlet, config klusterletConfig) (*operatorapiv1.Klusterlet, reconcileState, error) { // nothing should be done when deploy mode is hosted and hosted finalizer is not added. @@ -155,6 +187,13 @@ func (r *managedReconcile) clean(ctx context.Context, klusterlet *operatorapiv1. } } + // remove aggregate work clusterrole + aggregateClusterRoleName := fmt.Sprintf("open-cluster-management:%s-work:aggregate", klusterlet.Name) + if err := r.managedClusterClients.kubeClient.RbacV1().ClusterRoles().Delete( + ctx, aggregateClusterRoleName, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + return klusterlet, reconcileStop, err + } + // remove the klusterlet namespace and klusterlet addon namespace on the managed cluster // For now, whether in Default or Hosted mode, the addons could be deployed on the managed cluster. namespaces := []string{config.KlusterletNamespace, fmt.Sprintf("%s-addon", config.KlusterletNamespace)} diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go index 05b694a62..c74c817d9 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go @@ -159,7 +159,7 @@ func (r *runtimeReconcile) installSingletonAgent(ctx context.Context, klusterlet // Create managed config secret for agent. In singletonHosted mode, service account for registration/work is actually // the same one, and we just pick one of them to build the external kubeconfig. if err := r.createManagedClusterKubeconfig(ctx, klusterlet, config.KlusterletNamespace, config.AgentNamespace, - config.RegistrationServiceAccount, config.ExternalManagedKubeConfigAgentSecret, + config.WorkServiceAccount, config.ExternalManagedKubeConfigAgentSecret, r.recorder); err != nil { return klusterlet, reconcileStop, err } diff --git a/test/e2e/work_workload_test.go b/test/e2e/work_workload_test.go index af87a6d92..20c7c3ab7 100644 --- a/test/e2e/work_workload_test.go +++ b/test/e2e/work_workload_test.go @@ -989,7 +989,7 @@ func newAggregatedClusterRole(name, apiGroup, resource string) *rbacv1.ClusterRo ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ - "rbac.authorization.k8s.io/aggregate-to-admin": "true", + "open-cluster-management.io/aggregate-to-work": "true", }, }, Rules: []rbacv1.PolicyRule{ diff --git a/test/integration/operator/klusterlet_singleton_test.go b/test/integration/operator/klusterlet_singleton_test.go index 330779842..674a42fbf 100644 --- a/test/integration/operator/klusterlet_singleton_test.go +++ b/test/integration/operator/klusterlet_singleton_test.go @@ -77,7 +77,7 @@ var _ = ginkgo.Describe("Klusterlet Singleton mode", func() { workManagementRoleName = fmt.Sprintf("open-cluster-management:management:%s-work:agent", klusterlet.Name) registrationManagedRoleName = fmt.Sprintf("open-cluster-management:%s-registration:agent", klusterlet.Name) workManagedRoleName = fmt.Sprintf("open-cluster-management:%s-work:agent", klusterlet.Name) - saName = fmt.Sprintf("%s-agent-sa", klusterlet.Name) + saName = fmt.Sprintf("%s-work-sa", klusterlet.Name) }) ginkgo.AfterEach(func() {