diff --git a/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml b/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml index 9fe6669dc..501ee5e6a 100644 --- a/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml +++ b/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml @@ -59,7 +59,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-07-14T00:10:06Z" + createdAt: "2024-09-30T01:52:57Z" description: Manages the installation and upgrade of the ClusterManager. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 diff --git a/deploy/klusterlet/chart/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/deploy/klusterlet/chart/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index c1cbc503a..d5e0e0ef7 100644 --- a/deploy/klusterlet/chart/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/deploy/klusterlet/chart/klusterlet/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -289,6 +289,39 @@ spec: If it is set empty, use the default value: 50 format: int32 type: integer + registrationDriver: + description: This provides driver details required to register + with hub + properties: + authType: + default: csr + description: Type of the authentication used by managedcluster + to register as well as pull work from hub. Possible values + are csr and awsirsa. + enum: + - csr + - awsirsa + type: string + awsIrsa: + description: |- + Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. + This is required only when the authType is awsirsa. + properties: + hubClusterArn: + description: |- + The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. + Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1. + minLength: 1 + type: string + managedClusterArn: + description: |- + The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub + as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. + Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1. + minLength: 1 + type: string + type: object + type: object type: object registrationImagePullSpec: description: |- diff --git a/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index c1cbc503a..d5e0e0ef7 100644 --- a/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -289,6 +289,39 @@ spec: If it is set empty, use the default value: 50 format: int32 type: integer + registrationDriver: + description: This provides driver details required to register + with hub + properties: + authType: + default: csr + description: Type of the authentication used by managedcluster + to register as well as pull work from hub. Possible values + are csr and awsirsa. + enum: + - csr + - awsirsa + type: string + awsIrsa: + description: |- + Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. + This is required only when the authType is awsirsa. + properties: + hubClusterArn: + description: |- + The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. + Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1. + minLength: 1 + type: string + managedClusterArn: + description: |- + The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub + as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. + Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1. + minLength: 1 + type: string + type: object + type: object type: object registrationImagePullSpec: description: |- diff --git a/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml b/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml index e87733a40..b37e082f2 100644 --- a/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml +++ b/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml @@ -31,7 +31,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-07-14T00:10:07Z" + createdAt: "2024-09-30T01:52:57Z" description: Manages the installation and upgrade of the Klusterlet. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 diff --git a/deploy/klusterlet/olm-catalog/latest/manifests/operator.open-cluster-management.io_klusterlets.yaml b/deploy/klusterlet/olm-catalog/latest/manifests/operator.open-cluster-management.io_klusterlets.yaml index 5e688225e..7bbe93d6a 100644 --- a/deploy/klusterlet/olm-catalog/latest/manifests/operator.open-cluster-management.io_klusterlets.yaml +++ b/deploy/klusterlet/olm-catalog/latest/manifests/operator.open-cluster-management.io_klusterlets.yaml @@ -289,6 +289,39 @@ spec: If it is set empty, use the default value: 50 format: int32 type: integer + registrationDriver: + description: This provides driver details required to register + with hub + properties: + authType: + default: csr + description: Type of the authentication used by managedcluster + to register as well as pull work from hub. Possible values + are csr and awsirsa. + enum: + - csr + - awsirsa + type: string + awsIrsa: + description: |- + Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. + This is required only when the authType is awsirsa. + properties: + hubClusterArn: + description: |- + The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. + Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1. + minLength: 1 + type: string + managedClusterArn: + description: |- + The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub + as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. + Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1. + minLength: 1 + type: string + type: object + type: object type: object registrationImagePullSpec: description: |- diff --git a/go.mod b/go.mod index 870030667..0dd30f0d7 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( k8s.io/kube-aggregator v0.30.3 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a - open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c + open-cluster-management.io/api v0.14.1-0.20240929023505-ab092a65ab63 open-cluster-management.io/sdk-go v0.14.1-0.20240918072645-225dcf1b6866 sigs.k8s.io/controller-runtime v0.18.5 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 diff --git a/go.sum b/go.sum index d2eb6ae50..a6174fd9b 100644 --- a/go.sum +++ b/go.sum @@ -441,8 +441,8 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0g k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a h1:La1cYE3xkPFS2OJnsPQbkkahKE7yabuPcIISRfb4qsg= open-cluster-management.io/addon-framework v0.10.1-0.20240703130731-ba7fd000a03a/go.mod h1:C1VETu/CIQKYfMiVAgNzPEUHjCpL9P1Z/KsGhHa4kl4= -open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c h1:gYfgkX/U6fv2d3Ly8D6N1GM9zokORupLSgCxx791zZw= -open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= +open-cluster-management.io/api v0.14.1-0.20240929023505-ab092a65ab63 h1:UV1OCtyt0EH/mgsdsvyxOPg9xva5pHjhGgAa0+gLpUM= +open-cluster-management.io/api v0.14.1-0.20240929023505-ab092a65ab63/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= open-cluster-management.io/sdk-go v0.14.1-0.20240918072645-225dcf1b6866 h1:nxYrSsYwl9Mq8DuaJ0K98PCpuGsai+AvXbggMfZDCGI= open-cluster-management.io/sdk-go v0.14.1-0.20240918072645-225dcf1b6866/go.mod h1:jCyXPY900UK1n4xwUBWSz27s7lcXN/fhIDF6xu3jIHw= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= diff --git a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler.go b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler.go index 25ba2c2b4..cc8bbfc2b 100644 --- a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler.go +++ b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler.go @@ -3,7 +3,10 @@ package addonconfiguration import ( "context" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" @@ -19,13 +22,48 @@ type managedClusterAddonConfigurationReconciler struct { func (d *managedClusterAddonConfigurationReconciler) reconcile( ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { var errs []error + configured := sets.Set[string]{} + // Update mca config references and configured condition to true for addons to rollout. for _, addon := range graph.getAddonsToUpdate() { - mca := d.mergeAddonConfig(addon.mca, addon.desiredConfigs) - patcher := patcher.NewPatcher[ - *addonv1alpha1.ManagedClusterAddOn, addonv1alpha1.ManagedClusterAddOnSpec, addonv1alpha1.ManagedClusterAddOnStatus]( - d.addonClient.AddonV1alpha1().ManagedClusterAddOns(mca.Namespace)) - _, err := patcher.PatchStatus(ctx, mca, mca.Status, addon.mca.Status) + // update mca config references in status + newAddon := d.mergeAddonConfig(addon.mca, addon.desiredConfigs) + // update mca configured condition to true + d.setCondition(newAddon, metav1.ConditionTrue, "ConfigurationsConfigured", "Configurations configured") + + err := d.patchAddonStatus(ctx, newAddon, addon.mca) + if err != nil { + errs = append(errs, err) + } + + configured.Insert(addon.mca.Namespace) + } + + // Update mca configured condition to false for addons with rollout status ToApply. + for _, addon := range graph.getAddonsToApply() { + if configured.Has(addon.mca.Namespace) { + continue + } + newAddon := addon.mca.DeepCopy() + d.setCondition(newAddon, metav1.ConditionFalse, "ConfigurationsNotConfigured", "Configurations updated and not configured yet") + + err := d.patchAddonStatus(ctx, newAddon, addon.mca) + if err != nil { + errs = append(errs, err) + } + } + + // Update mca configured condition to true for addons with rollout status Succeeded. + // Including: + // 1. Addons without configurations, the rollout status is set to success in func setRolloutStatus(), + // should set condition to true. + // 2. Addons with configurations and already rollout successfully. In upgrade scenario, when the + // addon configurations do not change while addon components upgrade, should set condition to true. + for _, addon := range graph.getAddonsSucceeded() { + newAddon := addon.mca.DeepCopy() + d.setCondition(newAddon, metav1.ConditionTrue, "ConfigurationsConfigured", "Configurations configured") + + err := d.patchAddonStatus(ctx, newAddon, addon.mca) if err != nil { errs = append(errs, err) } @@ -88,3 +126,25 @@ func (d *managedClusterAddonConfigurationReconciler) mergeAddonConfig( mcaCopy.Status.ConfigReferences = configRefs return mcaCopy } + +// setCondition updates the configured condition for the addon +func (d *managedClusterAddonConfigurationReconciler) setCondition( + addon *addonv1alpha1.ManagedClusterAddOn, status metav1.ConditionStatus, reason, message string) { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: status, + Reason: reason, + Message: message, + }) +} + +// patchAddonStatus patches the status of the addon +func (d *managedClusterAddonConfigurationReconciler) patchAddonStatus( + ctx context.Context, newaddon *addonv1alpha1.ManagedClusterAddOn, oldaddon *addonv1alpha1.ManagedClusterAddOn) error { + patcher := patcher.NewPatcher[ + *addonv1alpha1.ManagedClusterAddOn, addonv1alpha1.ManagedClusterAddOnSpec, addonv1alpha1.ManagedClusterAddOnStatus]( + d.addonClient.AddonV1alpha1().ManagedClusterAddOns(newaddon.Namespace)) + + _, err := patcher.PatchStatus(ctx, newaddon, newaddon.Status, oldaddon.Status) + return err +} diff --git a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go index 05d71d440..d075e72a3 100644 --- a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go +++ b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go @@ -8,6 +8,7 @@ import ( "time" apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -48,7 +49,12 @@ func TestAddonConfigReconcile(t *testing.T) { clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").Build(), placements: []runtime.Object{}, placementDecisions: []runtime.Object{}, - validateAddonActions: addontesting.AssertNoActions, + validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { + addontesting.AssertActions(t, actions, "patch", "patch") + sort.Sort(byPatchName(actions)) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) + }, }, { name: "manual installStrategy", @@ -80,6 +86,17 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{ + ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test"}, + SpecHash: "hash", + }, + LastObservedGeneration: 0, + }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) }, }, { @@ -151,6 +168,8 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) }, }, { @@ -275,6 +294,8 @@ func TestAddonConfigReconcile(t *testing.T) { LastObservedGeneration: 0, }, }) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) }, }, { @@ -397,6 +418,8 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) }, }, { @@ -477,6 +500,8 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) }, }, { @@ -543,6 +568,7 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) }, }, { @@ -616,6 +642,7 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 1, }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) }, }, { @@ -629,7 +656,12 @@ func TestAddonConfigReconcile(t *testing.T) { SpecHash: "hash1", }, LastObservedGeneration: 1, - }}, nil), + }}, []metav1.Condition{{ + Type: addonv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + Reason: "ConfigurationsConfigured", + Message: "Configurations configured", + }}), }, placements: []runtime.Object{ &clusterv1beta1.Placement{ObjectMeta: metav1.ObjectMeta{Name: "test-placement", Namespace: "default"}}, @@ -716,9 +748,12 @@ func TestAddonConfigReconcile(t *testing.T) { }, }).Build(), validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { - addontesting.AssertActions(t, actions, "patch") + addontesting.AssertActions(t, actions, "patch", "patch", "patch") sort.Sort(byPatchName(actions)) - expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + // cluster1 is not in installstrategy and has no config + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + // cluster2 is in installstrategy and is the first to rollout + expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{ ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, DesiredConfig: &addonv1alpha1.ConfigSpecHash{ @@ -727,6 +762,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) + // cluster3 is in installstrategy and is not rollout + expectPatchConditionAction(t, actions[2], metav1.ConditionFalse) }, }, { @@ -773,9 +811,12 @@ func TestAddonConfigReconcile(t *testing.T) { }, }).Build(), validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { - addontesting.AssertActions(t, actions, "patch") + addontesting.AssertActions(t, actions, "patch", "patch", "patch") sort.Sort(byPatchName(actions)) - expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + // cluster1 is not in installstrategy and has no config + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + // cluster2 is in installstrategy and is the first to rollout + expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{ ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, DesiredConfig: &addonv1alpha1.ConfigSpecHash{ @@ -784,6 +825,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) + // cluster3 is in installstrategy and is not rollout + expectPatchConditionAction(t, actions[2], metav1.ConditionFalse) }, }, { @@ -827,9 +871,12 @@ func TestAddonConfigReconcile(t *testing.T) { }, }).Build(), validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { - addontesting.AssertActions(t, actions, "patch", "patch") + addontesting.AssertActions(t, actions, "patch", "patch", "patch") sort.Sort(byPatchName(actions)) - expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ + // cluster1 is not in installstrategy and has no config + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + // cluster2 is in installstrategy and rollout + expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{ ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, DesiredConfig: &addonv1alpha1.ConfigSpecHash{ @@ -838,7 +885,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) - expectPatchConfigurationAction(t, actions[1], []addonv1alpha1.ConfigReference{{ + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) + // cluster2 is in installstrategy and rollout + expectPatchConfigurationAction(t, actions[2], []addonv1alpha1.ConfigReference{{ ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, DesiredConfig: &addonv1alpha1.ConfigSpecHash{ @@ -847,6 +896,7 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[2], metav1.ConditionTrue) }, }, { @@ -913,8 +963,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, }).Build(), validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { - addontesting.AssertActions(t, actions, "patch", "patch") + addontesting.AssertActions(t, actions, "patch", "patch", "patch") sort.Sort(byPatchName(actions)) + // cluster1 and cluster2 are rollout expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, @@ -933,6 +984,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[2], metav1.ConditionFalse) }, }, { @@ -991,8 +1045,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, }).Build(), validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { - addontesting.AssertActions(t, actions, "patch", "patch") + addontesting.AssertActions(t, actions, "patch", "patch", "patch") sort.Sort(byPatchName(actions)) + // cluster1 and cluster2 are rollout expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, @@ -1011,6 +1066,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[2], metav1.ConditionFalse) }, }, { @@ -1077,8 +1135,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, }).Build(), validateAddonActions: func(t *testing.T, actions []clienttesting.Action) { - addontesting.AssertActions(t, actions, "patch", "patch") + addontesting.AssertActions(t, actions, "patch", "patch", "patch") sort.Sort(byPatchName(actions)) + // cluster1 and cluster2 are rollout expectPatchConfigurationAction(t, actions[0], []addonv1alpha1.ConfigReference{{ ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: addonv1alpha1.ConfigReferent{Name: "test1"}, @@ -1097,6 +1156,9 @@ func TestAddonConfigReconcile(t *testing.T) { }, LastObservedGeneration: 0, }}) + expectPatchConditionAction(t, actions[0], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[1], metav1.ConditionTrue) + expectPatchConditionAction(t, actions[2], metav1.ConditionFalse) }, }, } @@ -1207,3 +1269,17 @@ func expectPatchConfigurationAction(t *testing.T, action clienttesting.Action, e t.Errorf("Configuration not correctly patched, expected %v, actual %v", expected, mca.Status.ConfigReferences) } } + +func expectPatchConditionAction(t *testing.T, action clienttesting.Action, expected metav1.ConditionStatus) { + patch := action.(clienttesting.PatchActionImpl).GetPatch() + mca := &addonv1alpha1.ManagedClusterAddOn{} + err := json.Unmarshal(patch, mca) + if err != nil { + t.Fatal(err) + } + + actualCond := meta.FindStatusCondition(mca.Status.Conditions, addonv1alpha1.ManagedClusterAddOnConditionConfigured) + if actualCond == nil || actualCond.Status != expected { + t.Errorf("Condition not correctly patched, expected %v, actual %v", expected, mca.Status.Conditions) + } +} diff --git a/pkg/addon/controllers/addonconfiguration/graph.go b/pkg/addon/controllers/addonconfiguration/graph.go index 561a7f069..52275ad2f 100644 --- a/pkg/addon/controllers/addonconfiguration/graph.go +++ b/pkg/addon/controllers/addonconfiguration/graph.go @@ -312,6 +312,28 @@ func (g *configurationGraph) getAddonsToUpdate() []*addonNode { return addons } +func (g *configurationGraph) getAddonsToApply() []*addonNode { + var addons []*addonNode + for _, node := range g.nodes { + addons = append(addons, node.getAddonsToApply()...) + } + + addons = append(addons, g.defaults.getAddonsToApply()...) + + return addons +} + +func (g *configurationGraph) getAddonsSucceeded() []*addonNode { + var addons []*addonNode + for _, node := range g.nodes { + addons = append(addons, node.getAddonsSucceeded()...) + } + + addons = append(addons, g.defaults.getAddonsSucceeded()...) + + return addons +} + func (g *configurationGraph) getRequeueTime() time.Duration { minRequeue := maxRequeueTime @@ -422,6 +444,30 @@ func (n *installStrategyNode) getAddonsToUpdate() []*addonNode { return addons } +// getAddonsToApply return the addons to sync configurations +func (n *installStrategyNode) getAddonsToApply() []*addonNode { + var addons []*addonNode + + for i, addon := range n.children { + if addon.status.Status == clustersdkv1alpha1.ToApply { + addons = append(addons, n.children[i]) + } + } + return addons +} + +// getAddonsSucceeded return the addons already rollout successfully or has no configurations +func (n *installStrategyNode) getAddonsSucceeded() []*addonNode { + var addons []*addonNode + + for i, addon := range n.children { + if addon.status.Status == clustersdkv1alpha1.Succeeded { + addons = append(addons, n.children[i]) + } + } + return addons +} + // Return the number of succeed addons. // Including the addons with status Succeed after MinSuccessTime. func (n *installStrategyNode) countAddonUpgradeSucceed() int { diff --git a/pkg/addon/templateagent/template_agent.go b/pkg/addon/templateagent/template_agent.go index a5cb28d9b..2aa1a5c95 100644 --- a/pkg/addon/templateagent/template_agent.go +++ b/pkg/addon/templateagent/template_agent.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/valyala/fasttemplate" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -96,6 +97,11 @@ func (a *CRDTemplateAgentAddon) Manifests( cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionConfigured) { + klog.InfoS("Addon configured condition is not set in status", "addonName", addon.Name) + return nil, fmt.Errorf("addon %s configured condition is not is not set in status", addon.Name) + } + template, err := a.getDesiredAddOnTemplateInner(addon.Name, addon.Status.ConfigReferences) if err != nil { return nil, err diff --git a/pkg/addon/templateagent/template_agent_test.go b/pkg/addon/templateagent/template_agent_test.go index bea479ae8..31e3e6826 100644 --- a/pkg/addon/templateagent/template_agent_test.go +++ b/pkg/addon/templateagent/template_agent_test.go @@ -136,6 +136,7 @@ func TestAddonTemplateAgentManifests(t *testing.T) { name string addonTemplatePath string addonDeploymentConfig *addonapiv1alpha1.AddOnDeploymentConfig + addonConditions []metav1.Condition managedCluster *clusterv1.ManagedCluster expectedErr string validateObjects func(t *testing.T, objects []runtime.Object) @@ -161,6 +162,51 @@ func TestAddonTemplateAgentManifests(t *testing.T) { managedCluster: addonfactory.NewFakeManagedCluster(clusterName, "1.10.1"), expectedErr: fmt.Sprintf("addon %s/%s template not found in status", clusterName, addonName), }, + { + name: "no configured condition", + addonTemplatePath: "./testmanifests/addontemplate.yaml", + addonDeploymentConfig: &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello-config", + Namespace: "default", + }, + Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ + CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{ + { + Name: "LOG_LEVEL", + Value: "4", + }, + }, + }, + }, + addonConditions: []metav1.Condition{}, + managedCluster: addonfactory.NewFakeManagedCluster(clusterName, "1.10.1"), + expectedErr: fmt.Sprintf("addon %s configured condition is not is not set in status", addonName), + }, + { + name: "false configured condition", + addonTemplatePath: "./testmanifests/addontemplate.yaml", + addonDeploymentConfig: &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello-config", + Namespace: "default", + }, + Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ + CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{ + { + Name: "LOG_LEVEL", + Value: "4", + }, + }, + }, + }, + addonConditions: []metav1.Condition{{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionFalse, + }}, + managedCluster: addonfactory.NewFakeManagedCluster(clusterName, "1.10.1"), + expectedErr: fmt.Sprintf("addon %s configured condition is not is not set in status", addonName), + }, { name: "manifests rendered successfully", addonTemplatePath: "./testmanifests/addontemplate.yaml", @@ -486,7 +532,15 @@ func TestAddonTemplateAgentManifests(t *testing.T) { managedClusterAddonBuilder.withAddonDeploymentConfig(tc.addonDeploymentConfig) objs = append(objs, tc.addonDeploymentConfig) } - managedClusterAddon = managedClusterAddonBuilder.build() + + if tc.addonConditions != nil { + managedClusterAddon = managedClusterAddonBuilder.withSetStatusConditions(tc.addonConditions).build() + } else { + managedClusterAddon = managedClusterAddonBuilder.withSetStatusConditions([]metav1.Condition{{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + }}).build() + } objs = append(objs, managedClusterAddon) hubKubeClient := fakekube.NewSimpleClientset() @@ -931,6 +985,7 @@ type testManagedClusterAddOnBuilder struct { addonTemplate *addonapiv1alpha1.AddOnTemplate addonDeploymentConfig *addonapiv1alpha1.AddOnDeploymentConfig setStatusConfigReferences bool + conditions []metav1.Condition } func newManagedClusterAddonBuilder(mca *addonapiv1alpha1.ManagedClusterAddOn) *testManagedClusterAddOnBuilder { @@ -957,6 +1012,11 @@ func (b *testManagedClusterAddOnBuilder) withSetStatusConfigReferences(set bool) return b } +func (b *testManagedClusterAddOnBuilder) withSetStatusConditions(conditions []metav1.Condition) *testManagedClusterAddOnBuilder { + b.conditions = conditions + return b +} + func (b *testManagedClusterAddOnBuilder) build() *addonapiv1alpha1.ManagedClusterAddOn { if b.addonDeploymentConfig != nil { hash, _ := utils.GetAddOnDeploymentConfigSpecHash(b.addonDeploymentConfig) @@ -987,6 +1047,7 @@ func (b *testManagedClusterAddOnBuilder) build() *addonapiv1alpha1.ManagedCluste ConfigGroupResource: configReference.ConfigGroupResource, ConfigReferent: configReference.ConfigReferent, }) + b.managedClusterAddOn.Status.Conditions = b.conditions } if b.addonTemplate != nil { @@ -1015,6 +1076,7 @@ func (b *testManagedClusterAddOnBuilder) build() *addonapiv1alpha1.ManagedCluste ConfigGroupResource: configReference.ConfigGroupResource, ConfigReferent: configReference.ConfigReferent, }) + b.managedClusterAddOn.Status.Conditions = b.conditions } return b.managedClusterAddOn } diff --git a/test/integration/addon/addon_manager_upgrade_test.go b/test/integration/addon/addon_manager_upgrade_test.go index 17d9ff8e4..5c86cb997 100644 --- a/test/integration/addon/addon_manager_upgrade_test.go +++ b/test/integration/addon/addon_manager_upgrade_test.go @@ -234,6 +234,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonCompleted, Message: "completed with no errors.", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + Reason: "ConfigurationsConfigured", + Message: "Configurations configured", + }) } ginkgo.By("check cma status") @@ -313,6 +319,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonCompleted, Message: "completed with no errors.", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + Reason: "ConfigurationsConfigured", + Message: "Configurations configured", + }) } ginkgo.By("check cma status") @@ -403,6 +415,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonProgressing, Message: "progressing... work is not ready", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + Reason: "ConfigurationsConfigured", + Message: "Configurations configured", + }) } for i := 2; i < 4; i++ { assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{ @@ -436,6 +454,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonCompleted, Message: "completed with no errors.", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionFalse, + Reason: "ConfigurationsNotConfigured", + Message: "Configurations updated and not configured yet", + }) } ginkgo.By("check cma status") @@ -519,6 +543,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonCompleted, Message: "completed with no errors.", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + Reason: "ConfigurationsConfigured", + Message: "Configurations configured", + }) } ginkgo.By("check cma status") @@ -567,6 +597,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonCompleted, Message: "completed with no errors.", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + Reason: "ConfigurationsConfigured", + Message: "Configurations configured", + }) } ginkgo.By("check cma status") assertClusterManagementAddOnInstallProgression(testAddOnConfigsImpl.name, addonapiv1alpha1.InstallProgression{ @@ -664,6 +700,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonProgressing, Message: "progressing... work is not ready", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + Reason: "ConfigurationsConfigured", + Message: "Configurations configured", + }) } for i := 2; i < 4; i++ { assertManagedClusterAddOnConfigReferences(testAddOnConfigsImpl.name, clusterNames[i], addonapiv1alpha1.ConfigReference{ @@ -697,6 +739,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonCompleted, Message: "completed with no errors.", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionFalse, + Reason: "ConfigurationsNotConfigured", + Message: "Configurations updated and not configured yet", + }) } ginkgo.By("check cma status") @@ -793,6 +841,12 @@ var _ = ginkgo.Describe("Addon upgrade", func() { Reason: addonapiv1alpha1.ProgressingReasonCompleted, Message: "completed with no errors.", }) + assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured, + Status: metav1.ConditionTrue, + Reason: "ConfigurationsConfigured", + Message: "Configurations configured", + }) } ginkgo.By("check cma status") diff --git a/test/integration/addon/suite_test.go b/test/integration/addon/suite_test.go index efaaf7edd..feca49dc3 100644 --- a/test/integration/addon/suite_test.go +++ b/test/integration/addon/suite_test.go @@ -2,6 +2,7 @@ package integration import ( "context" + "fmt" "path/filepath" "testing" @@ -9,6 +10,7 @@ import ( "github.com/onsi/gomega" "github.com/openshift/library-go/pkg/controller/controllercmd" certificatesv1 "k8s.io/api/certificates/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -138,6 +140,9 @@ type testAddon struct { } func (t *testAddon) Manifests(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionConfigured) { + return nil, fmt.Errorf("addon %s configured condition is not is not set in status", addon.Name) + } return t.manifests[cluster.Name], nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 0422521a1..da32ebdb2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1528,7 +1528,7 @@ open-cluster-management.io/addon-framework/pkg/agent open-cluster-management.io/addon-framework/pkg/assets open-cluster-management.io/addon-framework/pkg/index open-cluster-management.io/addon-framework/pkg/utils -# open-cluster-management.io/api v0.14.1-0.20240627145512-bd6f2229b53c +# open-cluster-management.io/api v0.14.1-0.20240929023505-ab092a65ab63 ## explicit; go 1.22.0 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go index 82ad206c5..1ed2134bc 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go @@ -318,7 +318,6 @@ const ( // the managed cluster. ManagedClusterAddOnConditionDegraded string = "Degraded" - // Deprecated: Use ManagedClusterAddOnConditionProgressing instead // ManagedClusterAddOnConditionConfigured represents that the addon agent is configured with its configuration ManagedClusterAddOnConditionConfigured string = "Configured" diff --git a/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml index 576ca94ff..c211a638d 100644 --- a/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -185,6 +185,29 @@ spec: type: integer format: int32 default: 50 + registrationDriver: + description: This provides driver details required to register with hub + type: object + properties: + authType: + description: Type of the authentication used by managedcluster to register as well as pull work from hub. Possible values are csr and awsirsa. + type: string + default: csr + enum: + - csr + - awsirsa + awsIrsa: + description: 'Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. This is required only when the authType is awsirsa.' + type: object + properties: + hubClusterArn: + description: 'The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.' + type: string + minLength: 1 + managedClusterArn: + description: 'The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.' + type: string + minLength: 1 registrationImagePullSpec: description: RegistrationImagePullSpec represents the desired image configuration of registration agent. quay.io/open-cluster-management.io/registration:latest will be used if unspecified. type: string diff --git a/vendor/open-cluster-management.io/api/feature/feature.go b/vendor/open-cluster-management.io/api/feature/feature.go index 9b3956d85..a6b151961 100644 --- a/vendor/open-cluster-management.io/api/feature/feature.go +++ b/vendor/open-cluster-management.io/api/feature/feature.go @@ -77,6 +77,9 @@ const ( // MultipleHubs allows user to configure multiple bootstrapkubeconfig connecting to different hubs via Klusterlet and let agent decide which one to use MultipleHubs featuregate.Feature = "MultipleHubs" + + // ClusterProfile will start new controller in the Hub that can be used to sync ManagedCluster to ClusterProfile. + ClusterProfile featuregate.Feature = "ClusterProfile" ) // DefaultSpokeRegistrationFeatureGates consists of all known ocm-registration @@ -97,6 +100,7 @@ var DefaultHubRegistrationFeatureGates = map[featuregate.Feature]featuregate.Fea V1beta1CSRAPICompatibility: {Default: false, PreRelease: featuregate.Alpha}, ManagedClusterAutoApproval: {Default: false, PreRelease: featuregate.Alpha}, ResourceCleanup: {Default: false, PreRelease: featuregate.Alpha}, + ClusterProfile: {Default: false, PreRelease: featuregate.Alpha}, } var DefaultHubAddonManagerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ diff --git a/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index c1cbc503a..d5e0e0ef7 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -289,6 +289,39 @@ spec: If it is set empty, use the default value: 50 format: int32 type: integer + registrationDriver: + description: This provides driver details required to register + with hub + properties: + authType: + default: csr + description: Type of the authentication used by managedcluster + to register as well as pull work from hub. Possible values + are csr and awsirsa. + enum: + - csr + - awsirsa + type: string + awsIrsa: + description: |- + Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. + This is required only when the authType is awsirsa. + properties: + hubClusterArn: + description: |- + The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. + Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1. + minLength: 1 + type: string + managedClusterArn: + description: |- + The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub + as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. + Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1. + minLength: 1 + type: string + type: object + type: object type: object registrationImagePullSpec: description: |- diff --git a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go index 395340504..8e89b4bdb 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go +++ b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go @@ -172,6 +172,35 @@ type RegistrationConfiguration struct { // But if the user updates the content of a failed bootstrapkubeconfig, the "failed" mark will be cleared. // +optional BootstrapKubeConfigs BootstrapKubeConfigs `json:"bootstrapKubeConfigs,omitempty"` + + // This provides driver details required to register with hub + // +optional + RegistrationDriver RegistrationDriver `json:"registrationDriver,omitempty"` +} + +type RegistrationDriver struct { + // Type of the authentication used by managedcluster to register as well as pull work from hub. Possible values are csr and awsirsa. + // +kubebuilder:default:=csr + // +kubebuilder:validation:Enum=csr;awsirsa + AuthType string `json:"authType"` + + // Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. + // This is required only when the authType is awsirsa. + AwsIrsa *AwsIrsa `json:"awsIrsa,omitempty"` +} + +type AwsIrsa struct { + // The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. + // Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1. + // +required + // +kubebuilder:validation:MinLength=1 + HubClusterArn string `json:"hubClusterArn"` + // The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub + // as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. + // Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1. + // +required + // +kubebuilder:validation:MinLength=1 + ManagedClusterArn string `json:"managedClusterArn"` } type TypeBootstrapKubeConfigs string diff --git a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go index c4df39e13..64a618ba5 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go @@ -32,6 +32,22 @@ func (in *AddOnManagerConfiguration) DeepCopy() *AddOnManagerConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AwsIrsa) DeepCopyInto(out *AwsIrsa) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsIrsa. +func (in *AwsIrsa) DeepCopy() *AwsIrsa { + if in == nil { + return nil + } + out := new(AwsIrsa) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BootstrapKubeConfigs) DeepCopyInto(out *BootstrapKubeConfigs) { *out = *in @@ -506,6 +522,7 @@ func (in *RegistrationConfiguration) DeepCopyInto(out *RegistrationConfiguration } } in.BootstrapKubeConfigs.DeepCopyInto(&out.BootstrapKubeConfigs) + in.RegistrationDriver.DeepCopyInto(&out.RegistrationDriver) return } @@ -519,6 +536,27 @@ func (in *RegistrationConfiguration) DeepCopy() *RegistrationConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrationDriver) DeepCopyInto(out *RegistrationDriver) { + *out = *in + if in.AwsIrsa != nil { + in, out := &in.AwsIrsa, &out.AwsIrsa + *out = new(AwsIrsa) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationDriver. +func (in *RegistrationDriver) DeepCopy() *RegistrationDriver { + if in == nil { + return nil + } + out := new(RegistrationDriver) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistrationHubConfiguration) DeepCopyInto(out *RegistrationHubConfiguration) { *out = *in diff --git a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go index b01728bc7..5d63b9197 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -163,6 +163,15 @@ func (WorkConfiguration) SwaggerDoc() map[string]string { return map_WorkConfiguration } +var map_AwsIrsa = map[string]string{ + "hubClusterArn": "The arn of the hub cluster (ie: an EKS cluster). This will be required to pass information to hub, which hub will use to create IAM identities for this klusterlet. Example - arn:eks:us-west-2:12345678910:cluster/hub-cluster1.", + "managedClusterArn": "The arn of the managed cluster (ie: an EKS cluster). This will be required to generate the md5hash which will be used as a suffix to create IAM role on hub as well as used by kluslerlet-agent, to assume role suffixed with the md5hash, on startup. Example - arn:eks:us-west-2:12345678910:cluster/managed-cluster1.", +} + +func (AwsIrsa) SwaggerDoc() map[string]string { + return map_AwsIrsa +} + var map_BootstrapKubeConfigs = map[string]string{ "type": "Type specifies the type of priority bootstrap kubeconfigs. By default, it is set to None, representing no priority bootstrap kubeconfigs are set.", "localSecretsConfig": "LocalSecretsConfig include a list of secrets that contains the kubeconfigs for ordered bootstrap kubeconifigs. The secrets must be in the same namespace where the agent controller runs.", @@ -268,12 +277,22 @@ var map_RegistrationConfiguration = map[string]string{ "kubeAPIQPS": "KubeAPIQPS indicates the maximum QPS while talking with apiserver of hub cluster from the spoke cluster. If it is set empty, use the default value: 50", "kubeAPIBurst": "KubeAPIBurst indicates the maximum burst of the throttle while talking with apiserver of hub cluster from the spoke cluster. If it is set empty, use the default value: 100", "bootstrapKubeConfigs": "BootstrapKubeConfigs defines the ordered list of bootstrap kubeconfigs. The order decides which bootstrap kubeconfig to use first when rebootstrap.\n\nWhen the agent loses the connection to the current hub over HubConnectionTimeoutSeconds, or the managedcluster CR is set `hubAcceptsClient=false` on the hub, the controller marks the related bootstrap kubeconfig as \"failed\".\n\nA failed bootstrapkubeconfig won't be used for the duration specified by SkipFailedBootstrapKubeConfigSeconds. But if the user updates the content of a failed bootstrapkubeconfig, the \"failed\" mark will be cleared.", + "registrationDriver": "This provides driver details required to register with hub", } func (RegistrationConfiguration) SwaggerDoc() map[string]string { return map_RegistrationConfiguration } +var map_RegistrationDriver = map[string]string{ + "authType": "Type of the authentication used by managedcluster to register as well as pull work from hub. Possible values are csr and awsirsa.", + "awsIrsa": "Contain the details required for registering with hub cluster (ie: an EKS cluster) using AWS IAM roles for service account. This is required only when the authType is awsirsa.", +} + +func (RegistrationDriver) SwaggerDoc() map[string]string { + return map_RegistrationDriver +} + var map_ServerURL = map[string]string{ "": "ServerURL represents the apiserver url and ca bundle that is accessible externally", "url": "URL is the url of apiserver endpoint of the managed cluster.",