diff --git a/deploy/cluster-manager/config/rbac/cluster_role.yaml b/deploy/cluster-manager/config/rbac/cluster_role.yaml index ce37bcd7a..5bf5203a5 100644 --- a/deploy/cluster-manager/config/rbac/cluster_role.yaml +++ b/deploy/cluster-manager/config/rbac/cluster_role.yaml @@ -24,6 +24,7 @@ rules: - "work-controller-sa-kubeconfig" - "addon-manager-controller-sa-kubeconfig" - "external-hub-kubeconfig" + - "work-driver-config" - apiGroups: [""] resources: ["secrets"] verbs: ["create"] diff --git a/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml b/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml index d4b866d84..3ce24d5bc 100644 --- a/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml +++ b/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml @@ -59,7 +59,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-04-10T15:46:14Z" + createdAt: "2024-04-17T07:51:41Z" description: Manages the installation and upgrade of the ClusterManager. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 @@ -146,6 +146,7 @@ spec: - work-controller-sa-kubeconfig - addon-manager-controller-sa-kubeconfig - external-hub-kubeconfig + - work-driver-config resources: - secrets verbs: diff --git a/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index 4896f275f..3ad8fb969 100644 --- a/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -181,6 +181,51 @@ spec: description: RegistrationConfiguration contains the configuration of registration properties: + bootstrapKubeConfigs: + description: "BootstrapKubeConfigs defines the ordered list of + bootstrap kubeconfigs. The order decides which bootstrap kubeconfig + to use first when rebootstrap. \n When the agent loses the connection + to the current hub over HubConnectionTimeoutSeconds, or the + managedcluster CR is set `hubAcceptsClient=false` on the hub, + the controller marks the related bootstrap kubeconfig as \"failed\". + \n A failed bootstrapkubeconfig won't be used for the duration + specified by SkipFailedBootstrapKubeConfigSeconds. But if the + user updates the content of a failed bootstrapkubeconfig, the + \"failed\" mark will be cleared." + properties: + localSecretsConfig: + description: LocalSecretsConfig include a list of secrets + that contains the kubeconfigs for ordered bootstrap kubeconifigs. + The secrets must be in the same namespace where the agent + controller runs. + properties: + hubConnectionTimeoutSeconds: + default: 600 + description: HubConnectionTimeoutSeconds is used to set + the timeout of connecting to the hub cluster. When agent + loses the connection to the hub over the timeout seconds, + the agent do a rebootstrap. By default is 10 mins. + format: int32 + minimum: 180 + type: integer + secretNames: + description: SecretNames is a list of secret names. The + secrets are in the same namespace where the agent controller + runs. + items: + type: string + type: array + type: object + type: + default: None + description: Type specifies the type of priority bootstrap + kubeconfigs. By default, it is set to None, representing + no priority bootstrap kubeconfigs are set. + enum: + - None + - LocalSecrets + type: string + type: object clientCertExpirationSeconds: description: clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the diff --git a/deploy/klusterlet/olm-catalog/latest/manifests/operator.open-cluster-management.io_klusterlets.yaml b/deploy/klusterlet/olm-catalog/latest/manifests/operator.open-cluster-management.io_klusterlets.yaml index 40d81f073..8701897dd 100644 --- a/deploy/klusterlet/olm-catalog/latest/manifests/operator.open-cluster-management.io_klusterlets.yaml +++ b/deploy/klusterlet/olm-catalog/latest/manifests/operator.open-cluster-management.io_klusterlets.yaml @@ -181,6 +181,51 @@ spec: description: RegistrationConfiguration contains the configuration of registration properties: + bootstrapKubeConfigs: + description: "BootstrapKubeConfigs defines the ordered list of + bootstrap kubeconfigs. The order decides which bootstrap kubeconfig + to use first when rebootstrap. \n When the agent loses the connection + to the current hub over HubConnectionTimeoutSeconds, or the + managedcluster CR is set `hubAcceptsClient=false` on the hub, + the controller marks the related bootstrap kubeconfig as \"failed\". + \n A failed bootstrapkubeconfig won't be used for the duration + specified by SkipFailedBootstrapKubeConfigSeconds. But if the + user updates the content of a failed bootstrapkubeconfig, the + \"failed\" mark will be cleared." + properties: + localSecretsConfig: + description: LocalSecretsConfig include a list of secrets + that contains the kubeconfigs for ordered bootstrap kubeconifigs. + The secrets must be in the same namespace where the agent + controller runs. + properties: + hubConnectionTimeoutSeconds: + default: 600 + description: HubConnectionTimeoutSeconds is used to set + the timeout of connecting to the hub cluster. When agent + loses the connection to the hub over the timeout seconds, + the agent do a rebootstrap. By default is 10 mins. + format: int32 + minimum: 180 + type: integer + secretNames: + description: SecretNames is a list of secret names. The + secrets are in the same namespace where the agent controller + runs. + items: + type: string + type: array + type: object + type: + default: None + description: Type specifies the type of priority bootstrap + kubeconfigs. By default, it is set to None, representing + no priority bootstrap kubeconfigs are set. + enum: + - None + - LocalSecrets + type: string + type: object clientCertExpirationSeconds: description: clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the diff --git a/go.mod b/go.mod index 48b8940d7..aef2c02a9 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( k8s.io/kube-aggregator v0.29.3 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556 - open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c + open-cluster-management.io/api v0.13.1-0.20240419062633-aacb530ea4ad open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 diff --git a/go.sum b/go.sum index f3b3c3c33..17cc91d26 100644 --- a/go.sum +++ b/go.sum @@ -425,8 +425,8 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0g k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556 h1:X3vJEx9agC94l7SitpWZFDshISdL1niqVH0+diyqfJo= open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556/go.mod h1:HayKCznnlyW+0dUJQGj5sNR6i3tvylSySD3YnvZkBtY= -open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c h1:/iUoY6/PqBmcBq3v0+UBFvIcI39k/QPRGqpOv9XtDIc= -open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= +open-cluster-management.io/api v0.13.1-0.20240419062633-aacb530ea4ad h1:DB3GpK5vzbGu9ss13bfodi8pGTkPcpdcLvOPEPMptTk= +open-cluster-management.io/api v0.13.1-0.20240419062633-aacb530ea4ad/go.mod h1:yrNuMMpciXjXPnj2yznb6LTyrGliiTrFZAJDp/Ck3c4= open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 h1:zFmHuW+ztdfUUNslqNW+H1WEcfdEUQHoRDbmdajX340= open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090/go.mod h1:w2OaxtCyegxeyFLU42UQ3oxUz01QdsBQkcHI17T/l48= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= diff --git a/manifests/cluster-manager/management/cluster-manager-manifestworkreplicaset-deployment.yaml b/manifests/cluster-manager/management/cluster-manager-manifestworkreplicaset-deployment.yaml index 54b8d0a87..189420b42 100644 --- a/manifests/cluster-manager/management/cluster-manager-manifestworkreplicaset-deployment.yaml +++ b/manifests/cluster-manager/management/cluster-manager-manifestworkreplicaset-deployment.yaml @@ -46,7 +46,13 @@ spec: args: - "/work" - "manager" - - "--work-driver=kube" + {{ if .CloudEventsDriverEnabled }} + - "--work-driver={{ .WorkDriver }}" + {{ if ne .WorkDriver "kube" }} + - "--cloudevents-client-id=work-controller-$(POD_NAME)" + - "--work-driver-config=/var/run/secrets/work/config.yaml" + {{ end }} + {{ end }} {{ if .HostedMode }} - "--kubeconfig=/var/run/secrets/hub/kubeconfig" {{ end }} @@ -97,6 +103,11 @@ spec: name: kubeconfig readOnly: true {{ end }} + {{ if and .CloudEventsDriverEnabled (ne .WorkDriver "kube") }} + - mountPath: /var/run/secrets/work + name: workdriverconfig + readOnly: true + {{ end }} volumes: - name: tmpdir emptyDir: { } @@ -105,3 +116,8 @@ spec: secret: secretName: work-controller-sa-kubeconfig {{ end }} + {{ if and .CloudEventsDriverEnabled (ne .WorkDriver "kube") }} + - name: workdriverconfig + secret: + secretName: work-driver-config + {{ end }} diff --git a/manifests/config.go b/manifests/config.go index fddd15d95..4fd5782c9 100644 --- a/manifests/config.go +++ b/manifests/config.go @@ -19,6 +19,8 @@ type HubConfig struct { AddOnManagerImage string AddOnManagerEnabled bool MWReplicaSetEnabled bool + CloudEventsDriverEnabled bool + WorkDriver string AutoApproveUsers string // ResourceRequirementResourceType is the resource requirement resource type for the cluster manager managed containers. ResourceRequirementResourceType operatorapiv1.ResourceQosClass diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go index 15d9e808e..1bfa0fde7 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go @@ -58,7 +58,8 @@ type clusterManagerController struct { mwctrEnabled, addonManagerEnabled bool) error generateHubClusterClients func(hubConfig *rest.Config) (kubernetes.Interface, apiextensionsclient.Interface, migrationclient.StorageVersionMigrationsGetter, error) - skipRemoveCRDs bool + skipRemoveCRDs bool + operatorNamespace string } type clusterManagerReconcile interface { @@ -83,6 +84,7 @@ func NewClusterManagerController( configMapInformer corev1informers.ConfigMapInformer, recorder events.Recorder, skipRemoveCRDs bool, + operatorNamespace string, ) factory.Controller { controller := &clusterManagerController{ operatorKubeClient: operatorKubeClient, @@ -97,6 +99,7 @@ func NewClusterManagerController( ensureSAKubeconfigs: ensureSAKubeconfigs, cache: resourceapply.NewResourceCache(), skipRemoveCRDs: skipRemoveCRDs, + operatorNamespace: operatorNamespace, } return factory.New().WithSync(controller.sync). @@ -132,6 +135,12 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f return err } + // default driver is kube + workDriver := operatorapiv1.WorkDriverTypeKube + if clusterManager.Spec.WorkConfiguration != nil && clusterManager.Spec.WorkConfiguration.WorkDriver != "" { + workDriver = clusterManager.Spec.WorkConfiguration.WorkDriver + } + // This config is used to render template of manifests. config := manifests.HubConfig{ ClusterManagerName: clusterManager.Name, @@ -150,6 +159,7 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f }, ResourceRequirementResourceType: helpers.ResourceType(clusterManager), ResourceRequirements: resourceRequirements, + WorkDriver: string(workDriver), } var registrationFeatureMsgs, workFeatureMsgs, addonFeatureMsgs string @@ -169,6 +179,7 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f } config.WorkFeatureGates, workFeatureMsgs = helpers.ConvertToFeatureGateFlags("Work", workFeatureGates, ocmfeature.DefaultHubWorkFeatureGates) config.MWReplicaSetEnabled = helpers.FeatureGateEnabled(workFeatureGates, ocmfeature.DefaultHubWorkFeatureGates, ocmfeature.ManifestWorkReplicaSet) + config.CloudEventsDriverEnabled = helpers.FeatureGateEnabled(workFeatureGates, ocmfeature.DefaultHubWorkFeatureGates, ocmfeature.CloudEventsDrivers) var addonFeatureGates []operatorapiv1.FeatureGate if clusterManager.Spec.AddOnManagerConfiguration != nil { @@ -211,6 +222,8 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f reconcilers := []clusterManagerReconcile{ &crdReconcile{cache: n.cache, recorder: n.recorder, hubAPIExtensionClient: hubApiExtensionClient, hubMigrationClient: hubMigrationClient, skipRemoveCRDs: n.skipRemoveCRDs}, + &secretReconcile{cache: n.cache, recorder: n.recorder, operatorKubeClient: n.operatorKubeClient, + hubKubeClient: hubClient, operatorNamespace: n.operatorNamespace}, &hubReoncile{cache: n.cache, recorder: n.recorder, hubKubeClient: hubClient}, &runtimeReconcile{cache: n.cache, recorder: n.recorder, hubKubeConfig: hubKubeConfig, hubKubeClient: hubClient, kubeClient: managementClient, ensureSAKubeconfigs: n.ensureSAKubeconfigs}, diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go index 6ef5bbe0f..2dc451bbe 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller_test.go @@ -31,6 +31,7 @@ import ( fakeoperatorlient "open-cluster-management.io/api/client/operator/clientset/versioned/fake" operatorinformers "open-cluster-management.io/api/client/operator/informers/externalversions" + ocmfeature "open-cluster-management.io/api/feature" operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/sdk-go/pkg/patcher" @@ -75,6 +76,7 @@ func newClusterManager(name string) *operatorapiv1.ClusterManager { }, WorkConfiguration: &operatorapiv1.WorkConfiguration{ FeatureGates: []operatorapiv1.FeatureGate{featureGate}, + WorkDriver: operatorapiv1.WorkDriverTypeKube, }, }, } @@ -299,6 +301,56 @@ func ensureObject(t *testing.T, object runtime.Object, hubCore *operatorapiv1.Cl } } +func TestSyncSecret(t *testing.T) { + operatorNamespace := metav1.NamespaceDefault + clusterManager := newClusterManager("testhub") + clusterManager.Spec.WorkConfiguration.FeatureGates = append(clusterManager.Spec.WorkConfiguration.FeatureGates, + operatorapiv1.FeatureGate{ + Feature: string(ocmfeature.CloudEventsDrivers), + Mode: operatorapiv1.FeatureGateModeTypeEnable, + }) + clusterManager.Spec.WorkConfiguration.WorkDriver = operatorapiv1.WorkDriverTypeGrpc + tc := newTestController(t, clusterManager) + tc.clusterManagerController.operatorNamespace = operatorNamespace + clusterManagerNamespace := helpers.ClusterManagerNamespace(clusterManager.Name, clusterManager.Spec.DeployOption.Mode) + setup(t, tc, nil) + + syncContext := testingcommon.NewFakeSyncContext(t, "testhub") + + err := tc.clusterManagerController.sync(ctx, syncContext) + if err != nil { + t.Fatalf("Expected no error when sync, %v", err) + } + + workDriverConfig := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work-driver-config", + }, + Data: map[string][]byte{ + "config.yaml": []byte("url: grpc.example.com:8443"), + }, + } + + if _, err = tc.managementKubeClient.CoreV1().Secrets(operatorNamespace).Create(ctx, workDriverConfig, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create work driver config secret: %v", err) + } + + err = tc.clusterManagerController.sync(ctx, syncContext) + if err != nil { + t.Fatalf("Expected no error when sync, %v", err) + } + + // TODO: add test for secret sync condition + syncedSecret, err := tc.hubKubeClient.CoreV1().Secrets(clusterManagerNamespace).Get(ctx, "work-driver-config", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get synced work driver config secret: %v", err) + } + + if string(syncedSecret.Data["config.yaml"]) != "url: grpc.example.com:8443" { + t.Fatalf("Expected secret data to be url: grpc.example.com:8443") + } +} + // TestSyncDeploy tests sync manifests of hub component func TestSyncDeploy(t *testing.T) { clusterManager := newClusterManager("testhub") diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_secret_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_secret_reconcile.go new file mode 100644 index 000000000..558eb4407 --- /dev/null +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_secret_reconcile.go @@ -0,0 +1,92 @@ +package clustermanagercontroller + +import ( + "context" + "fmt" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/kubernetes" + + operatorapiv1 "open-cluster-management.io/api/operator/v1" + + "open-cluster-management.io/ocm/manifests" + "open-cluster-management.io/ocm/pkg/operator/helpers" +) + +const ( + // workDriverConfig is the secret that contains the work driver configurarion + workDriverConfig = "work-driver-config" +) + +var ( + // secretNames is the slice of secrets to be synced from source namespace to the target namespace + secretNames = []string{} +) + +type secretReconcile struct { + operatorKubeClient kubernetes.Interface + hubKubeClient kubernetes.Interface + operatorNamespace string + cache resourceapply.ResourceCache + recorder events.Recorder +} + +func (c *secretReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { + // create a local slice of secrets and copy the secretNames to avoid modifying the global variable + pendingSyncSecrets := make([]string, len(secretNames)) + copy(pendingSyncSecrets, secretNames) + if config.CloudEventsDriverEnabled && config.WorkDriver != string(operatorapiv1.WorkDriverTypeKube) { + pendingSyncSecrets = append(pendingSyncSecrets, workDriverConfig) + } + + var syncedErrs []error + for _, secretName := range pendingSyncSecrets { + // sync the secret to target namespace + if _, _, err := helpers.SyncSecret( + ctx, + c.operatorKubeClient.CoreV1(), + c.hubKubeClient.CoreV1(), + c.recorder, + c.operatorNamespace, + secretName, + config.ClusterManagerNamespace, + secretName, + []metav1.OwnerReference{}, + ); err != nil { + syncedErrs = append(syncedErrs, fmt.Errorf("failed to sync secret %s: %v", secretName, err)) + } + } + + if len(syncedErrs) > 0 { + // TODO: set condition to indicate the secret sync error(s) + return cm, reconcileStop, utilerrors.NewAggregate(syncedErrs) + } + + return cm, reconcileContinue, nil +} + +func (c *secretReconcile) clean(ctx context.Context, cm *operatorapiv1.ClusterManager, + config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { + // create a local slice of secrets and copy the secretNames to avoid modifying the global variable + pendingCleanSecrets := make([]string, len(secretNames)) + copy(pendingCleanSecrets, secretNames) + if config.CloudEventsDriverEnabled && config.WorkDriver != string(operatorapiv1.WorkDriverTypeKube) { + pendingCleanSecrets = append(pendingCleanSecrets, workDriverConfig) + } + for _, secretName := range pendingCleanSecrets { + if err := c.hubKubeClient.CoreV1().Secrets(config.ClusterManagerNamespace).Delete(ctx, + secretName, metav1.DeleteOptions{}); err != nil { + if errors.IsNotFound(err) { + return cm, reconcileContinue, nil + } + return cm, reconcileStop, fmt.Errorf("failed to delete secret %s: %v", secretName, err) + } + } + + return cm, reconcileContinue, nil +} diff --git a/pkg/operator/operators/clustermanager/options.go b/pkg/operator/operators/clustermanager/options.go index d929596e6..faf213e26 100644 --- a/pkg/operator/operators/clustermanager/options.go +++ b/pkg/operator/operators/clustermanager/options.go @@ -74,7 +74,9 @@ func (o *Options) RunClusterManagerOperator(ctx context.Context, controllerConte kubeInformer.Apps().V1().Deployments(), kubeInformer.Core().V1().ConfigMaps(), controllerContext.EventRecorder, - o.SkipRemoveCRDs) + o.SkipRemoveCRDs, + controllerContext.OperatorNamespace, + ) statusController := clustermanagerstatuscontroller.NewClusterManagerStatusController( operatorClient.OperatorV1().ClusterManagers(), diff --git a/pkg/work/hub/options.go b/pkg/work/hub/options.go index b98c15303..b33a19b65 100644 --- a/pkg/work/hub/options.go +++ b/pkg/work/hub/options.go @@ -13,7 +13,9 @@ type WorkHubManagerOptions struct { } func NewWorkHubManagerOptions() *WorkHubManagerOptions { - return &WorkHubManagerOptions{} + return &WorkHubManagerOptions{ + WorkDriver: "kube", + } } // AddFlags register and binds the default flags diff --git a/test/integration/operator/clustermanager_hosted_test.go b/test/integration/operator/clustermanager_hosted_test.go index 7c3238072..0cefabdc7 100644 --- a/test/integration/operator/clustermanager_hosted_test.go +++ b/test/integration/operator/clustermanager_hosted_test.go @@ -17,6 +17,7 @@ import ( "open-cluster-management.io/api/feature" operatorapiv1 "open-cluster-management.io/api/operator/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "open-cluster-management.io/ocm/pkg/operator/helpers" "open-cluster-management.io/ocm/test/integration/util" @@ -487,6 +488,147 @@ var _ = ginkgo.Describe("ClusterManager Hosted Mode", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) + ginkgo.It("should have expected work driver when work driver is updated", func() { + ginkgo.By("Update work driver to grpc") + gomega.Eventually(func() error { + clusterManager, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get( + context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + featureGates := []operatorapiv1.FeatureGate{ + { + Feature: string(feature.ManifestWorkReplicaSet), + Mode: operatorapiv1.FeatureGateModeTypeEnable, + }, + { + Feature: string(feature.CloudEventsDrivers), + Mode: operatorapiv1.FeatureGateModeTypeEnable, + }, + } + if clusterManager.Spec.WorkConfiguration != nil { + for _, fg := range clusterManager.Spec.WorkConfiguration.FeatureGates { + if fg.Feature != string(feature.ManifestWorkReplicaSet) && + fg.Feature != string(feature.CloudEventsDrivers) { + featureGates = append(featureGates, fg) + } + } + } + clusterManager.Spec.WorkConfiguration = &operatorapiv1.WorkConfiguration{ + FeatureGates: featureGates, + WorkDriver: work.ConfigTypeGRPC, + } + _, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update( + context.Background(), clusterManager, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // gomega.Eventually(func() error { + // actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get( + // context.Background(), clusterManagerName, metav1.GetOptions{}) + // if err != nil { + // return err + // } + // if !meta.IsStatusConditionFalse(actual.Status.Conditions, "SecretSynced") { + // return fmt.Errorf("should get WorkDriverConfigSecretSynced condition false") + // } + // return nil + // }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + _, err := hostedKubeClient.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work-driver-config", + Namespace: "default", + }, + Data: map[string][]byte{ + "config.yaml": []byte("url: grpc.example.com:8443"), + }, + }, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // gomega.Eventually(func() error { + // actual, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get( + // context.Background(), clusterManagerName, metav1.GetOptions{}) + // if err != nil { + // return err + // } + // if !meta.IsStatusConditionTrue(actual.Status.Conditions, "SecretSynced") { + // return fmt.Errorf("should get WorkDriverConfigSecretSynced condition true") + // } + // return nil + // }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + actual, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get(context.Background(), + hubWorkControllerDeployment, metav1.GetOptions{}) + if err != nil { + return err + } + foundArg := false + for _, arg := range actual.Spec.Template.Spec.Containers[0].Args { + if arg == "--work-driver=grpc" { + foundArg = true + } + } + if !foundArg { + return fmt.Errorf("do not find the --work-driver=grpc args, got %v", actual.Spec.Template.Spec.Containers[0].Args) + } + foundVol := false + for _, vol := range actual.Spec.Template.Spec.Volumes { + if vol.Name == "workdriverconfig" && vol.Secret.SecretName == "work-driver-config" { + foundVol = true + } + } + if !foundVol { + return fmt.Errorf("do not find the workdriverconfig volume, got %v", actual.Spec.Template.Spec.Volumes) + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + gomega.Eventually(func() error { + workConfigSecret, err := hostedKubeClient.CoreV1().Secrets(hubNamespaceHosted).Get(context.Background(), + "work-driver-config", metav1.GetOptions{}) + if err != nil { + return err + } + if string(workConfigSecret.Data["config.yaml"]) != "url: grpc.example.com:8443" { + return fmt.Errorf("do not find the expected config.yaml, got %v", string(workConfigSecret.Data["config.yaml"])) + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + ginkgo.By("Revert work driver back to kube") + gomega.Eventually(func() error { + clusterManager, err := hostedOperatorClient.OperatorV1().ClusterManagers().Get( + context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + clusterManager.Spec.WorkConfiguration.WorkDriver = operatorapiv1.WorkDriverTypeKube + _, err = hostedOperatorClient.OperatorV1().ClusterManagers().Update( + context.Background(), clusterManager, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + gomega.Eventually(func() error { + actual, err := hostedKubeClient.AppsV1().Deployments(hubNamespaceHosted).Get(context.Background(), + hubWorkControllerDeployment, metav1.GetOptions{}) + if err != nil { + return err + } + for _, arg := range actual.Spec.Template.Spec.Containers[0].Args { + if arg == "--work-driver=grpc" { + return err + } + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + err = hostedKubeClient.CoreV1().Secrets("default").Delete(context.Background(), + "work-driver-config", metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + ginkgo.It("should have expected resource created/deleted successfully when feature gates AddOnManager enabled/disabled", func() { ginkgo.By("Disable AddOnManager feature gate") // Check addon manager disable mode diff --git a/test/integration/operator/clustermanager_test.go b/test/integration/operator/clustermanager_test.go index cf0b4a6d6..176046987 100644 --- a/test/integration/operator/clustermanager_test.go +++ b/test/integration/operator/clustermanager_test.go @@ -17,6 +17,7 @@ import ( "open-cluster-management.io/api/feature" operatorapiv1 "open-cluster-management.io/api/operator/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "open-cluster-management.io/ocm/pkg/operator/helpers" "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager" @@ -44,8 +45,9 @@ func startHubOperator(ctx context.Context, mode operatorapiv1.InstallMode) { o := &clustermanager.Options{} err := o.RunClusterManagerOperator(ctx, &controllercmd.ControllerContext{ - KubeConfig: config, - EventRecorder: util.NewIntegrationTestEventRecorder("integration"), + KubeConfig: config, + EventRecorder: util.NewIntegrationTestEventRecorder("integration"), + OperatorNamespace: metav1.NamespaceDefault, }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -461,6 +463,147 @@ var _ = ginkgo.Describe("ClusterManager Default Mode", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) + ginkgo.It("should have expected work driver when work driver is updated", func() { + ginkgo.By("Update work driver to grpc") + gomega.Eventually(func() error { + clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get( + context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + featureGates := []operatorapiv1.FeatureGate{ + { + Feature: string(feature.ManifestWorkReplicaSet), + Mode: operatorapiv1.FeatureGateModeTypeEnable, + }, + { + Feature: string(feature.CloudEventsDrivers), + Mode: operatorapiv1.FeatureGateModeTypeEnable, + }, + } + if clusterManager.Spec.WorkConfiguration != nil { + for _, fg := range clusterManager.Spec.WorkConfiguration.FeatureGates { + if fg.Feature != string(feature.ManifestWorkReplicaSet) && + fg.Feature != string(feature.CloudEventsDrivers) { + featureGates = append(featureGates, fg) + } + } + } + clusterManager.Spec.WorkConfiguration = &operatorapiv1.WorkConfiguration{ + FeatureGates: featureGates, + WorkDriver: work.ConfigTypeGRPC, + } + _, err = operatorClient.OperatorV1().ClusterManagers().Update( + context.Background(), clusterManager, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + // gomega.Eventually(func() error { + // actual, err := operatorClient.OperatorV1().ClusterManagers().Get( + // context.Background(), clusterManagerName, metav1.GetOptions{}) + // if err != nil { + // return err + // } + // if !meta.IsStatusConditionFalse(actual.Status.Conditions, "SecretSynced") { + // return fmt.Errorf("should get WorkDriverConfigSecretSynced condition false") + // } + // return nil + // }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + _, err := kubeClient.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work-driver-config", + Namespace: "default", + }, + Data: map[string][]byte{ + "config.yaml": []byte("url: grpc.example.com:8443"), + }, + }, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // gomega.Eventually(func() error { + // actual, err := operatorClient.OperatorV1().ClusterManagers().Get( + // context.Background(), clusterManagerName, metav1.GetOptions{}) + // if err != nil { + // return err + // } + // if !meta.IsStatusConditionTrue(actual.Status.Conditions, "SecretSynced") { + // return fmt.Errorf("should get WorkDriverConfigSecretSynced condition true") + // } + // return nil + // }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + actual, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), + hubWorkControllerDeployment, metav1.GetOptions{}) + if err != nil { + return err + } + foundArg := false + for _, arg := range actual.Spec.Template.Spec.Containers[0].Args { + if arg == "--work-driver=grpc" { + foundArg = true + } + } + if !foundArg { + return fmt.Errorf("do not find the --work-driver=grpc args, got %v", actual.Spec.Template.Spec.Containers[0].Args) + } + foundVol := false + for _, vol := range actual.Spec.Template.Spec.Volumes { + if vol.Name == "workdriverconfig" && vol.Secret.SecretName == "work-driver-config" { + foundVol = true + } + } + if !foundVol { + return fmt.Errorf("do not find the workdriverconfig volume, got %v", actual.Spec.Template.Spec.Volumes) + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + gomega.Eventually(func() error { + workConfigSecret, err := kubeClient.CoreV1().Secrets(hubNamespace).Get(context.Background(), + "work-driver-config", metav1.GetOptions{}) + if err != nil { + return err + } + if string(workConfigSecret.Data["config.yaml"]) != "url: grpc.example.com:8443" { + return fmt.Errorf("do not find the expected config.yaml, got %v", string(workConfigSecret.Data["config.yaml"])) + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + ginkgo.By("Revert work driver back to kube") + gomega.Eventually(func() error { + clusterManager, err := operatorClient.OperatorV1().ClusterManagers().Get( + context.Background(), clusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + clusterManager.Spec.WorkConfiguration.WorkDriver = operatorapiv1.WorkDriverTypeKube + _, err = operatorClient.OperatorV1().ClusterManagers().Update( + context.Background(), clusterManager, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + gomega.Eventually(func() error { + actual, err := kubeClient.AppsV1().Deployments(hubNamespace).Get(context.Background(), + hubWorkControllerDeployment, metav1.GetOptions{}) + if err != nil { + return err + } + for _, arg := range actual.Spec.Template.Spec.Containers[0].Args { + if arg == "--work-driver=grpc" { + return err + } + } + return nil + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeNil()) + + err = kubeClient.CoreV1().Secrets("default").Delete(context.Background(), + "work-driver-config", metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + ginkgo.It("should have expected resource created/deleted successfully when feature gates AddOnManager enabled/disabled", func() { ginkgo.By("Check addon manager disable mode") gomega.Eventually(func() error { diff --git a/test/integration/operator/integration_suite_test.go b/test/integration/operator/integration_suite_test.go index 605ef73c0..bea6e7b9a 100644 --- a/test/integration/operator/integration_suite_test.go +++ b/test/integration/operator/integration_suite_test.go @@ -139,6 +139,7 @@ var _ = ginkgo.BeforeSuite(func() { Mode: operatorapiv1.InstallModeDefault, }, WorkConfiguration: &operatorapiv1.WorkConfiguration{ + WorkDriver: operatorapiv1.WorkDriverTypeKube, FeatureGates: []operatorapiv1.FeatureGate{ { Feature: string(feature.NilExecutorValidating), @@ -177,10 +178,11 @@ var _ = ginkgo.BeforeSuite(func() { }, }, WorkConfiguration: &operatorapiv1.WorkConfiguration{ + WorkDriver: operatorapiv1.WorkDriverTypeKube, FeatureGates: []operatorapiv1.FeatureGate{ { - Feature: "ManifestWorkReplicaSet", - Mode: "Enable", + Feature: string(feature.ManifestWorkReplicaSet), + Mode: operatorapiv1.FeatureGateModeTypeEnable, }, }, }, diff --git a/vendor/modules.txt b/vendor/modules.txt index 24ce8cd97..4f8049615 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1507,7 +1507,7 @@ open-cluster-management.io/addon-framework/pkg/basecontroller/events open-cluster-management.io/addon-framework/pkg/basecontroller/factory open-cluster-management.io/addon-framework/pkg/index open-cluster-management.io/addon-framework/pkg/utils -# open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c +# open-cluster-management.io/api v0.13.1-0.20240419062633-aacb530ea4ad ## explicit; go 1.21 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned diff --git a/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml index c6c2b162a..88ff39ad6 100644 --- a/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -118,6 +118,32 @@ spec: description: RegistrationConfiguration contains the configuration of registration type: object properties: + bootstrapKubeConfigs: + description: "BootstrapKubeConfigs defines the ordered list of bootstrap kubeconfigs. The order decides which bootstrap kubeconfig to use first when rebootstrap. \n When the agent loses the connection to the current hub over HubConnectionTimeoutSeconds, or the managedcluster CR is set `hubAcceptsClient=false` on the hub, the controller marks the related bootstrap kubeconfig as \"failed\". \n A failed bootstrapkubeconfig won't be used for the duration specified by SkipFailedBootstrapKubeConfigSeconds. But if the user updates the content of a failed bootstrapkubeconfig, the \"failed\" mark will be cleared." + type: object + properties: + localSecretsConfig: + description: LocalSecretsConfig include a list of secrets that contains the kubeconfigs for ordered bootstrap kubeconifigs. The secrets must be in the same namespace where the agent controller runs. + type: object + properties: + hubConnectionTimeoutSeconds: + description: HubConnectionTimeoutSeconds is used to set the timeout of connecting to the hub cluster. When agent loses the connection to the hub over the timeout seconds, the agent do a rebootstrap. By default is 10 mins. + type: integer + format: int32 + default: 600 + minimum: 180 + secretNames: + description: SecretNames is a list of secret names. The secrets are in the same namespace where the agent controller runs. + type: array + items: + type: string + type: + description: Type specifies the type of priority bootstrap kubeconfigs. By default, it is set to None, representing no priority bootstrap kubeconfigs are set. + type: string + default: None + enum: + - None + - LocalSecrets clientCertExpirationSeconds: description: clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the default duration seconds will be set by the hub cluster. If the value is larger than the max signing duration seconds set on the hub cluster, the max signing duration seconds will be set. type: integer diff --git a/vendor/open-cluster-management.io/api/feature/feature.go b/vendor/open-cluster-management.io/api/feature/feature.go index 8bd2aef95..45d90b425 100644 --- a/vendor/open-cluster-management.io/api/feature/feature.go +++ b/vendor/open-cluster-management.io/api/feature/feature.go @@ -64,6 +64,10 @@ const ( // of clusters selected by a placement. For more info check ManifestWorkReplicaSet APIs ManifestWorkReplicaSet featuregate.Feature = "ManifestWorkReplicaSet" + // CloudEventsDrivers will enable the cloud events drivers (mqtt or grpc) for the hub controller, + // so that the controller can deliver manifestworks to the managed clusters via cloud events. + CloudEventsDrivers featuregate.Feature = "CloudEventsDrivers" + // RawFeedbackJsonString will make the work agent to return the feedback result as a json string if the result // is not a scalar value. RawFeedbackJsonString featuregate.Feature = "RawFeedbackJsonString" @@ -100,6 +104,7 @@ var DefaultHubAddonManagerFeatureGates = map[featuregate.Feature]featuregate.Fea var DefaultHubWorkFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ NilExecutorValidating: {Default: false, PreRelease: featuregate.Alpha}, ManifestWorkReplicaSet: {Default: false, PreRelease: featuregate.Alpha}, + CloudEventsDrivers: {Default: false, PreRelease: featuregate.Alpha}, } // DefaultSpokeWorkFeatureGates consists of all known ocm work feature keys for work agent. diff --git a/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index 4896f275f..3ad8fb969 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -181,6 +181,51 @@ spec: description: RegistrationConfiguration contains the configuration of registration properties: + bootstrapKubeConfigs: + description: "BootstrapKubeConfigs defines the ordered list of + bootstrap kubeconfigs. The order decides which bootstrap kubeconfig + to use first when rebootstrap. \n When the agent loses the connection + to the current hub over HubConnectionTimeoutSeconds, or the + managedcluster CR is set `hubAcceptsClient=false` on the hub, + the controller marks the related bootstrap kubeconfig as \"failed\". + \n A failed bootstrapkubeconfig won't be used for the duration + specified by SkipFailedBootstrapKubeConfigSeconds. But if the + user updates the content of a failed bootstrapkubeconfig, the + \"failed\" mark will be cleared." + properties: + localSecretsConfig: + description: LocalSecretsConfig include a list of secrets + that contains the kubeconfigs for ordered bootstrap kubeconifigs. + The secrets must be in the same namespace where the agent + controller runs. + properties: + hubConnectionTimeoutSeconds: + default: 600 + description: HubConnectionTimeoutSeconds is used to set + the timeout of connecting to the hub cluster. When agent + loses the connection to the hub over the timeout seconds, + the agent do a rebootstrap. By default is 10 mins. + format: int32 + minimum: 180 + type: integer + secretNames: + description: SecretNames is a list of secret names. The + secrets are in the same namespace where the agent controller + runs. + items: + type: string + type: array + type: object + type: + default: None + description: Type specifies the type of priority bootstrap + kubeconfigs. By default, it is set to None, representing + no priority bootstrap kubeconfigs are set. + enum: + - None + - LocalSecrets + type: string + type: object clientCertExpirationSeconds: description: clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the diff --git a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go index ea97392d5..0a1e0ae33 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go +++ b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go @@ -162,6 +162,52 @@ type RegistrationConfiguration struct { // +optional // +kubebuilder:default:=100 KubeAPIBurst int32 `json:"kubeAPIBurst,omitempty"` + + // BootstrapKubeConfigs defines the ordered list of bootstrap kubeconfigs. The order decides which bootstrap kubeconfig to use first when rebootstrap. + // + // When the agent loses the connection to the current hub over HubConnectionTimeoutSeconds, or the managedcluster CR + // is set `hubAcceptsClient=false` on the hub, the controller marks the related bootstrap kubeconfig as "failed". + // + // A failed bootstrapkubeconfig won't be used for the duration specified by SkipFailedBootstrapKubeConfigSeconds. + // But if the user updates the content of a failed bootstrapkubeconfig, the "failed" mark will be cleared. + // +optional + BootstrapKubeConfigs BootstrapKubeConfigs `json:"bootstrapKubeConfigs,omitempty"` +} + +type TypeBootstrapKubeConfigs string + +const ( + LocalSecrets TypeBootstrapKubeConfigs = "LocalSecrets" + None TypeBootstrapKubeConfigs = "None" +) + +type BootstrapKubeConfigs struct { + // Type specifies the type of priority bootstrap kubeconfigs. + // By default, it is set to None, representing no priority bootstrap kubeconfigs are set. + // +required + // +kubebuilder:default:=None + // +kubebuilder:validation:Enum=None;LocalSecrets + Type TypeBootstrapKubeConfigs `json:"type,omitempty"` + + // LocalSecretsConfig include a list of secrets that contains the kubeconfigs for ordered bootstrap kubeconifigs. + // The secrets must be in the same namespace where the agent controller runs. + // +optional + LocalSecrets LocalSecretsConfig `json:"localSecretsConfig,omitempty"` +} + +type LocalSecretsConfig struct { + // SecretNames is a list of secret names. The secrets are in the same namespace where the agent controller runs. + // +required + // +kubebuilder:validation:minItems=2 + SecretNames []string `json:"secretNames"` + + // HubConnectionTimeoutSeconds is used to set the timeout of connecting to the hub cluster. + // When agent loses the connection to the hub over the timeout seconds, the agent do a rebootstrap. + // By default is 10 mins. + // +optional + // +kubebuilder:default:=600 + // +kubebuilder:validation:Minimum=180 + HubConnectionTimeoutSeconds int32 `json:"hubConnectionTimeoutSeconds,omitempty"` } type WorkAgentConfiguration struct { diff --git a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go index 7d4983e2b..fed805f8d 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -163,6 +163,15 @@ func (WorkConfiguration) SwaggerDoc() map[string]string { return map_WorkConfiguration } +var map_BootstrapKubeConfigs = map[string]string{ + "type": "Type specifies the type of priority bootstrap kubeconfigs. By default, it is set to None, representing no priority bootstrap kubeconfigs are set.", + "localSecretsConfig": "LocalSecretsConfig include a list of secrets that contains the kubeconfigs for ordered bootstrap kubeconifigs. The secrets must be in the same namespace where the agent controller runs.", +} + +func (BootstrapKubeConfigs) SwaggerDoc() map[string]string { + return map_BootstrapKubeConfigs +} + var map_HubApiServerHostAlias = map[string]string{ "": "HubApiServerHostAlias holds the mapping between IP and hostname that will be injected as an entry in the pod's hosts file.", "ip": "IP address of the host file entry.", @@ -235,12 +244,22 @@ func (KlusterletStatus) SwaggerDoc() map[string]string { return map_KlusterletStatus } +var map_LocalSecretsConfig = map[string]string{ + "secretNames": "SecretNames is a list of secret names. The secrets are in the same namespace where the agent controller runs.", + "hubConnectionTimeoutSeconds": "HubConnectionTimeoutSeconds is used to set the timeout of connecting to the hub cluster. When agent loses the connection to the hub over the timeout seconds, the agent do a rebootstrap. By default is 10 mins.", +} + +func (LocalSecretsConfig) SwaggerDoc() map[string]string { + return map_LocalSecretsConfig +} + var map_RegistrationConfiguration = map[string]string{ "clientCertExpirationSeconds": "clientCertExpirationSeconds represents the seconds of a client certificate to expire. If it is not set or 0, the default duration seconds will be set by the hub cluster. If the value is larger than the max signing duration seconds set on the hub cluster, the max signing duration seconds will be set.", "featureGates": "FeatureGates represents the list of feature gates for registration If it is set empty, default feature gates will be used. If it is set, featuregate/Foo is an example of one item in FeatureGates:\n 1. If featuregate/Foo does not exist, registration-operator will discard it\n 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true]\n 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n \the can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false.", "clusterAnnotations": "ClusterAnnotations is annotations with the reserve prefix \"agent.open-cluster-management.io\" set on ManagedCluster when creating only, other actors can update it afterwards.", "kubeAPIQPS": "KubeAPIQPS indicates the maximum QPS while talking with apiserver of hub cluster from the spoke cluster. If it is set empty, use the default value: 50", "kubeAPIBurst": "KubeAPIBurst indicates the maximum burst of the throttle while talking with apiserver of hub cluster from the spoke cluster. If it is set empty, use the default value: 100", + "bootstrapKubeConfigs": "BootstrapKubeConfigs defines the ordered list of bootstrap kubeconfigs. The order decides which bootstrap kubeconfig to use first when rebootstrap.\n\nWhen the agent loses the connection to the current hub over HubConnectionTimeoutSeconds, or the managedcluster CR is set `hubAcceptsClient=false` on the hub, the controller marks the related bootstrap kubeconfig as \"failed\".\n\nA failed bootstrapkubeconfig won't be used for the duration specified by SkipFailedBootstrapKubeConfigSeconds. But if the user updates the content of a failed bootstrapkubeconfig, the \"failed\" mark will be cleared.", } func (RegistrationConfiguration) SwaggerDoc() map[string]string {