diff --git a/deploy/cluster-manager/config/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml b/deploy/cluster-manager/config/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml index 8cc24234b..8abdb8e37 100644 --- a/deploy/cluster-manager/config/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml +++ b/deploy/cluster-manager/config/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml @@ -323,6 +323,8 @@ spec: type: string type: object workConfiguration: + default: + workDriver: kube description: WorkConfiguration contains the configuration of work properties: featureGates: @@ -356,6 +358,22 @@ spec: - feature type: object type: array + workDriver: + default: kube + description: "WorkDriver represents the type of work driver. Possible + values are \"kube\", \"mqtt\", or \"grpc\". If not provided, + the default value is \"kube\". If set to non-\"kube\" drivers, + the klusterlet need to use the same driver. and the driver configuration + must be provided in a secret named \"work-driver-config\" in + the namespace where the cluster manager is running, adhering + to the following structure: config.yaml: | + \n For detailed driver configuration, please refer to the sdk-go + documentation: https://github.com/open-cluster-management-io/sdk-go/blob/main/pkg/cloudevents/README.md#supported-protocols-and-drivers" + enum: + - kube + - mqtt + - grpc + type: string type: object workImagePullSpec: default: quay.io/open-cluster-management/work diff --git a/deploy/cluster-manager/olm-catalog/latest/manifests/operator.open-cluster-management.io_clustermanagers.yaml b/deploy/cluster-manager/olm-catalog/latest/manifests/operator.open-cluster-management.io_clustermanagers.yaml index 12dd60949..50c9c87b4 100644 --- a/deploy/cluster-manager/olm-catalog/latest/manifests/operator.open-cluster-management.io_clustermanagers.yaml +++ b/deploy/cluster-manager/olm-catalog/latest/manifests/operator.open-cluster-management.io_clustermanagers.yaml @@ -323,6 +323,8 @@ spec: type: string type: object workConfiguration: + default: + workDriver: kube description: WorkConfiguration contains the configuration of work properties: featureGates: @@ -356,6 +358,22 @@ spec: - feature type: object type: array + workDriver: + default: kube + description: "WorkDriver represents the type of work driver. Possible + values are \"kube\", \"mqtt\", or \"grpc\". If not provided, + the default value is \"kube\". If set to non-\"kube\" drivers, + the klusterlet need to use the same driver. and the driver configuration + must be provided in a secret named \"work-driver-config\" in + the namespace where the cluster manager is running, adhering + to the following structure: config.yaml: | + \n For detailed driver configuration, please refer to the sdk-go + documentation: https://github.com/open-cluster-management-io/sdk-go/blob/main/pkg/cloudevents/README.md#supported-protocols-and-drivers" + enum: + - kube + - mqtt + - grpc + type: string type: object workImagePullSpec: default: quay.io/open-cluster-management/work diff --git a/go.mod b/go.mod index d27dfc6de..63aa2eb7a 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( k8s.io/kube-aggregator v0.29.3 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/addon-framework v0.9.1 - open-cluster-management.io/api v0.13.0 + open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c open-cluster-management.io/sdk-go v0.13.1-0.20240313075541-00a94671ced1 sigs.k8s.io/controller-runtime v0.17.2 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 diff --git a/go.sum b/go.sum index dd6551150..1426c1650 100644 --- a/go.sum +++ b/go.sum @@ -425,8 +425,8 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0g k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/addon-framework v0.9.1 h1:m6n/W29G/4KzMx+8mgC9P/ybuiyNHVy+O6AHEaWbfQg= open-cluster-management.io/addon-framework v0.9.1/go.mod h1:OEIFCEXhZKO/Grv08CB0T+TGzS0bLshw4G9u7Vw8dw0= -open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= -open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= +open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c h1:/iUoY6/PqBmcBq3v0+UBFvIcI39k/QPRGqpOv9XtDIc= +open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= open-cluster-management.io/sdk-go v0.13.1-0.20240313075541-00a94671ced1 h1:s3dJdi1eol+/8ek6JQuaEuoGPkK/wRyM9zowqzKHPDY= open-cluster-management.io/sdk-go v0.13.1-0.20240313075541-00a94671ced1/go.mod h1:sq+amR9Ls9JzMP5dypvlCx4jIGfDg45gicS67Z/MnlI= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= diff --git a/pkg/operator/helpers/helpers.go b/pkg/operator/helpers/helpers.go index e7be599c9..6a3c0e4a0 100644 --- a/pkg/operator/helpers/helpers.go +++ b/pkg/operator/helpers/helpers.go @@ -3,6 +3,7 @@ package helpers import ( "context" "fmt" + "os" "reflect" "strings" @@ -37,6 +38,7 @@ import ( apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + "open-cluster-management.io/api/feature" operatorapiv1 "open-cluster-management.io/api/operator/v1" ) @@ -49,16 +51,24 @@ const ( FeatureGatesReasonInvalidExisting = "InvalidFeatureGatesExisting" ) +const ( + // ImagePullSecret is the image pull secret for operator components, which is synced from the operator ns to hub/spoke/addon ns. + ImagePullSecret = "open-cluster-management-image-pull-credentials" + + // DefaultComponentNamespace is the default namespace in which the operator is deployed + DefaultComponentNamespace = "open-cluster-management" +) + var ( genericScheme = runtime.NewScheme() genericCodecs = serializer.NewCodecFactory(genericScheme) genericCodec = genericCodecs.UniversalDeserializer() DefaultHubRegistrationFeatureGates = []operatorapiv1.FeatureGate{ - {Feature: "DefaultClusterSet", Mode: operatorapiv1.FeatureGateModeTypeEnable}, + {Feature: string(feature.DefaultClusterSet), Mode: operatorapiv1.FeatureGateModeTypeEnable}, } DefaultSpokeRegistrationFeatureGates = []operatorapiv1.FeatureGate{ - {Feature: "AddonManagement", Mode: operatorapiv1.FeatureGateModeTypeEnable}, + {Feature: string(feature.AddonManagement), Mode: operatorapiv1.FeatureGateModeTypeEnable}, } ) @@ -795,3 +805,12 @@ func IsSingleton(mode operatorapiv1.InstallMode) bool { func IsHosted(mode operatorapiv1.InstallMode) bool { return mode == operatorapiv1.InstallModeHosted || mode == operatorapiv1.InstallModeSingletonHosted } + +func GetOperatorNamespace() string { + operatorNamespace := DefaultComponentNamespace + nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err == nil { + operatorNamespace = string(nsBytes) + } + return operatorNamespace +} diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go index c9cbda8db..15d9e808e 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_controller.go @@ -38,9 +38,7 @@ import ( ) const ( - clusterManagerFinalizer = "operator.open-cluster-management.io/cluster-manager-cleanup" - clusterManagerApplied = "Applied" - clusterManagerProgressing = "Progressing" + clusterManagerFinalizer = "operator.open-cluster-management.io/cluster-manager-cleanup" defaultWebhookPort = int32(9443) clusterManagerReSyncTime = 5 * time.Second @@ -267,9 +265,9 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f clusterManager.Status.ObservedGeneration = clusterManager.Generation if len(errs) == 0 { meta.SetStatusCondition(&clusterManager.Status.Conditions, metav1.Condition{ - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionTrue, - Reason: "ClusterManagerApplied", + Reason: operatorapiv1.ReasonClusterManagerApplied, Message: "Components of cluster manager are applied", }) } else { diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go index d868fc618..4c83c0174 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_crd_reconcile.go @@ -80,9 +80,9 @@ func (c *crdReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterM }, hubCRDResourceFiles...); err != nil { meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{ - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionFalse, - Reason: "CRDApplyFaild", + Reason: operatorapiv1.ReasonClusterManagerCRDApplyFailed, Message: fmt.Sprintf("Failed to apply crd: %v", err), }) return cm, reconcileStop, err diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go index fc8e75424..f54708a21 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_hub_reconcile.go @@ -129,7 +129,7 @@ func (c *hubReoncile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterMa if len(appliedErrs) > 0 { meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{ - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionFalse, Reason: "HubResourceApplyFailed", Message: fmt.Sprintf("Failed to apply hub resources: %v", utilerrors.NewAggregate(appliedErrs)), diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go index 6f80a9d22..eff8683e1 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_runtime_reconcile.go @@ -83,9 +83,9 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus config.MWReplicaSetEnabled, config.AddOnManagerEnabled) if err != nil { meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{ - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionFalse, - Reason: "ServiceAccountSyncFailed", + Reason: operatorapiv1.ReasonServiceAccountSyncFailed, Message: fmt.Sprintf("Failed to sync service account: %v", err), }) return cm, reconcileStop, err @@ -158,25 +158,25 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus if len(progressingDeployments) > 0 { meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{ - Type: clusterManagerProgressing, + Type: operatorapiv1.ConditionProgressing, Status: metav1.ConditionTrue, - Reason: "ClusterManagerDeploymentRolling", + Reason: operatorapiv1.ReasonDeploymentRolling, Message: fmt.Sprintf("Deployments %s is still rolling", strings.Join(progressingDeployments, ",")), }) } else { meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{ - Type: clusterManagerProgressing, + Type: operatorapiv1.ConditionProgressing, Status: metav1.ConditionFalse, - Reason: "ClusterManagerUpToDate", + Reason: operatorapiv1.ReasonUpToDate, Message: "Components of cluster manager are up to date", }) } if len(appliedErrs) > 0 { meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{ - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionFalse, - Reason: "RuntimeResourceApplyFailed", + Reason: operatorapiv1.ReasonRuntimeResourceApplyFailed, Message: fmt.Sprintf("Failed to apply runtime resources: %v", utilerrors.NewAggregate(appliedErrs)), }) return cm, reconcileStop, utilerrors.NewAggregate(appliedErrs) diff --git a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go index 9b0b0a4c4..887e2c180 100644 --- a/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go +++ b/pkg/operator/operators/clustermanager/controllers/clustermanagercontroller/clustermanager_webhook_reconcile.go @@ -48,7 +48,7 @@ func (c *webhookReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) { var appliedErrs []error - if !meta.IsStatusConditionFalse(cm.Status.Conditions, clusterManagerProgressing) { + if !meta.IsStatusConditionFalse(cm.Status.Conditions, operatorapiv1.ConditionProgressing) { return cm, reconcileStop, commonhelpers.NewRequeueError("Deployment is not ready", clusterManagerReSyncTime) } @@ -81,9 +81,9 @@ func (c *webhookReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus if len(appliedErrs) > 0 { meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{ - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionFalse, - Reason: "WebhookApplyFailed", + Reason: operatorapiv1.ReasonWebhookApplyFailed, Message: fmt.Sprintf("Failed to apply webhook resources: %v", utilerrors.NewAggregate(appliedErrs)), }) return cm, reconcileStop, utilerrors.NewAggregate(appliedErrs) diff --git a/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go index 584e58deb..434162d59 100644 --- a/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller.go @@ -20,10 +20,10 @@ import ( operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1" operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1" + operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/pkg/common/queue" "open-cluster-management.io/ocm/pkg/operator/helpers" - "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller" ) var ( @@ -78,7 +78,7 @@ func (c *crdStatusController) sync(ctx context.Context, controllerContext factor } // need to wait storage version migrations succeed. - if succeeded := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, migrationcontroller.MigrationSucceeded); !succeeded { + if succeeded := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, operatorapiv1.ConditionMigrationSucceeded); !succeeded { controllerContext.Queue().AddRateLimited(clusterManagerName) klog.V(4).Info("Wait storage version migration succeed.") return nil diff --git a/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go index e0815c432..42c8bc22d 100644 --- a/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/crdstatuccontroller/crd_status_controller_test.go @@ -21,7 +21,6 @@ import ( operatorapiv1 "open-cluster-management.io/api/operator/v1" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" - "open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller" ) func TestSync(t *testing.T) { @@ -29,7 +28,7 @@ func TestSync(t *testing.T) { tc := newTestController(t, clusterManager) syncContext := testingcommon.NewFakeSyncContext(t, "testhub") - //Do not support migration + // Do not support migration err := tc.sync(context.Background(), syncContext) if err != nil { t.Fatalf("Expected no error when sync, %v", err) @@ -38,7 +37,7 @@ func TestSync(t *testing.T) { // migration succeed clusterManager.Status.Conditions = []metav1.Condition{ { - Type: migrationcontroller.MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionTrue, }, } diff --git a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go index 6ab3b4efd..04219bb3e 100644 --- a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go @@ -3,7 +3,6 @@ package migrationcontroller import ( "context" "fmt" - "time" "github.com/openshift/library-go/pkg/assets" "github.com/openshift/library-go/pkg/controller/factory" @@ -53,11 +52,7 @@ var ( ) const ( - clusterManagerApplied = "Applied" - MigrationSucceeded = "MigrationSucceeded" - migrationRequestCRDName = "storageversionmigrations.migration.k8s.io" - reSyncTime = time.Second * 5 ) type crdMigrationController struct { @@ -134,9 +129,9 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac if !supported { newClusterManager := clusterManager.DeepCopy() meta.SetStatusCondition(&newClusterManager.Status.Conditions, metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, - Reason: "StorageVersionMigrationFailed", + Reason: operatorapiv1.ReasonStorageVersionMigrationFailed, Message: "Do not support StorageVersionMigration", }) _, err = c.patcher.PatchStatus(ctx, newClusterManager, newClusterManager.Status, clusterManager.Status) @@ -144,7 +139,7 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac } // do not apply storage version migrations until other resources are applied - if applied := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, clusterManagerApplied); !applied { + if applied := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, operatorapiv1.ConditionClusterManagerApplied); !applied { controllerContext.Queue().AddRateLimited(clusterManagerName) return nil } @@ -162,7 +157,7 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac return } - //If migration not succeed, wait for all StorageVersionMigrations succeed. + // If migration not succeed, wait for all StorageVersionMigrations succeed. if migrationCond.Status != metav1.ConditionTrue { klog.V(4).Infof("Wait all StorageVersionMigrations succeed. migrationCond: %v. error: %v", migrationCond, err) controllerContext.Queue().AddRateLimited(clusterManagerName) @@ -176,9 +171,9 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac c.recorder.Warningf("StorageVersionMigrationFailed", "Failed to check CRD current storage version. %v", err) migrationCond = metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, - Reason: "StorageVersionMigrationFailed", + Reason: operatorapiv1.ReasonStorageVersionMigrationFailed, Message: fmt.Sprintf("Failed to check CRD current storage version. %v", err), } return nil @@ -189,9 +184,9 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac klog.Errorf("Failed to apply StorageVersionMigrations. %v", err) migrationCond = metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, - Reason: "StorageVersionMigrationFailed", + Reason: operatorapiv1.ReasonStorageVersionMigrationFailed, Message: fmt.Sprintf("Failed to create StorageVersionMigrations. %v", err), } return err @@ -308,9 +303,9 @@ func syncStorageVersionMigrationsCondition(ctx context.Context, toSyncMigrations migrationStatusCondition := getStorageVersionMigrationStatusCondition(existing) if migrationStatusCondition == nil { return metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, - Reason: "StorageVersionMigrationProcessing", + Reason: operatorapiv1.ReasonStorageVersionMigrationProcessing, Message: fmt.Sprintf("Wait StorageVersionMigration %v succeed.", existing.Name), }, nil } @@ -319,14 +314,14 @@ func syncStorageVersionMigrationsCondition(ctx context.Context, toSyncMigrations continue case migrationv1alpha1.MigrationFailed: return metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, Reason: fmt.Sprintf("StorageVersionMigration Failed. %v", migrationStatusCondition.Reason), Message: fmt.Sprintf("Failed to wait StorageVersionMigration %v succeed. %v", existing.Name, migrationStatusCondition.Message), }, nil case migrationv1alpha1.MigrationRunning: return metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, Reason: fmt.Sprintf("StorageVersionMigration Running. %v", migrationStatusCondition.Reason), Message: fmt.Sprintf("Wait StorageVersionMigration %v succeed. %v", existing.Name, migrationStatusCondition.Message), @@ -334,9 +329,9 @@ func syncStorageVersionMigrationsCondition(ctx context.Context, toSyncMigrations } } return metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionTrue, - Reason: "StorageVersionMigrationSucceed", + Reason: operatorapiv1.ReasonStorageVersionMigrationSucceed, Message: "All StorageVersionMigrations Succeed", }, nil } diff --git a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go index 560db41ec..4353f07a1 100644 --- a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go @@ -293,7 +293,7 @@ func TestSyncStorageVersionMigrationsCondition(t *testing.T) { }, wantErr: false, want: metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, }, }, @@ -329,7 +329,7 @@ func TestSyncStorageVersionMigrationsCondition(t *testing.T) { }, wantErr: false, want: metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, }, }, @@ -365,7 +365,7 @@ func TestSyncStorageVersionMigrationsCondition(t *testing.T) { }, wantErr: false, want: metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, }, }, @@ -401,7 +401,7 @@ func TestSyncStorageVersionMigrationsCondition(t *testing.T) { }, wantErr: false, want: metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionFalse, }, }, @@ -437,7 +437,7 @@ func TestSyncStorageVersionMigrationsCondition(t *testing.T) { }, wantErr: false, want: metav1.Condition{ - Type: MigrationSucceeded, + Type: operatorapiv1.ConditionMigrationSucceeded, Status: metav1.ConditionTrue, }, }, @@ -464,7 +464,7 @@ func TestSync(t *testing.T) { tc, client := newTestController(t, clusterManager) syncContext := testingcommon.NewFakeSyncContext(t, "testhub") - //Do not support migration + // Do not support migration err := tc.sync(context.Background(), syncContext) if err != nil { t.Fatalf("Expected no error when sync, %v", err) @@ -475,13 +475,13 @@ func TestSync(t *testing.T) { t.Fatalf("Expected no error when sync, %v", err) } - if notsucceeded := meta.IsStatusConditionFalse(clusterManager.Status.Conditions, MigrationSucceeded); !notsucceeded { + if notsucceeded := meta.IsStatusConditionFalse(clusterManager.Status.Conditions, operatorapiv1.ConditionMigrationSucceeded); !notsucceeded { t.Errorf("Error to sync clusterManager.Status.Conditions %v", clusterManager.Status.Conditions) } // all resources applied clusterManager.Status.Conditions = []metav1.Condition{ { - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionTrue, }, } @@ -498,7 +498,7 @@ func TestSync(t *testing.T) { if err != nil { t.Fatalf("Expected no error when sync, %v", err) } - if notsucceeded := meta.IsStatusConditionFalse(clusterManager.Status.Conditions, MigrationSucceeded); !notsucceeded { + if notsucceeded := meta.IsStatusConditionFalse(clusterManager.Status.Conditions, operatorapiv1.ConditionMigrationSucceeded); !notsucceeded { t.Errorf("Error to sync clusterManager.Status.Conditions %v", clusterManager.Status.Conditions) } } diff --git a/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go index d9a2605fb..995cf022b 100644 --- a/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller.go @@ -23,12 +23,6 @@ import ( "open-cluster-management.io/ocm/pkg/operator/helpers" ) -const ( - registrationDegraded = "HubRegistrationDegraded" - placementDegraded = "HubPlacementDegraded" - clusterManagerApplied = "Applied" -) - type clusterManagerStatusController struct { deploymentLister appslister.DeploymentLister patcher patcher.Patcher[*operatorapiv1.ClusterManager, operatorapiv1.ClusterManagerSpec, operatorapiv1.ClusterManagerStatus] @@ -73,7 +67,7 @@ func (s *clusterManagerStatusController) sync(ctx context.Context, controllerCon return err } - if meta.FindStatusCondition(clusterManager.Status.Conditions, clusterManagerApplied) == nil { + if meta.FindStatusCondition(clusterManager.Status.Conditions, operatorapiv1.ConditionClusterManagerApplied) == nil { return nil } @@ -98,27 +92,27 @@ func (s *clusterManagerStatusController) updateStatusOfRegistration(clusterManag registrationDeployment, err := s.deploymentLister.Deployments(clusterManagerNamespace).Get(registrationDeploymentName) if err != nil { return metav1.Condition{ - Type: registrationDegraded, + Type: operatorapiv1.ConditionHubRegistrationDegraded, Status: metav1.ConditionTrue, - Reason: "GetRegistrationDeploymentFailed", + Reason: operatorapiv1.ReasonGetRegistrationDeploymentFailed, Message: fmt.Sprintf("Failed to get registration deployment %q %q: %v", clusterManagerNamespace, registrationDeploymentName, err), } } if unavailablePod := helpers.NumOfUnavailablePod(registrationDeployment); unavailablePod > 0 { return metav1.Condition{ - Type: registrationDegraded, + Type: operatorapiv1.ConditionHubRegistrationDegraded, Status: metav1.ConditionTrue, - Reason: "UnavailableRegistrationPod", + Reason: operatorapiv1.ReasonUnavailableRegistrationPod, Message: fmt.Sprintf("%v of requested instances are unavailable of registration deployment %q %q", unavailablePod, clusterManagerNamespace, registrationDeploymentName), } } return metav1.Condition{ - Type: registrationDegraded, + Type: operatorapiv1.ConditionHubRegistrationDegraded, Status: metav1.ConditionFalse, - Reason: "RegistrationFunctional", + Reason: operatorapiv1.ReasonRegistrationFunctional, Message: "Registration is managing credentials", } } @@ -130,27 +124,27 @@ func (s *clusterManagerStatusController) updateStatusOfPlacement(clusterManagerN placementDeployment, err := s.deploymentLister.Deployments(clusterManagerNamespace).Get(placementDeploymentName) if err != nil { return metav1.Condition{ - Type: placementDegraded, + Type: operatorapiv1.ConditionHubPlacementDegraded, Status: metav1.ConditionTrue, - Reason: "GetPlacementDeploymentFailed", + Reason: operatorapiv1.ReasonGetPlacementDeploymentFailed, Message: fmt.Sprintf("Failed to get placement deployment %q %q: %v", clusterManagerNamespace, placementDeploymentName, err), } } if unavailablePod := helpers.NumOfUnavailablePod(placementDeployment); unavailablePod > 0 { return metav1.Condition{ - Type: placementDegraded, + Type: operatorapiv1.ConditionHubPlacementDegraded, Status: metav1.ConditionTrue, - Reason: "UnavailablePlacementPod", + Reason: operatorapiv1.ReasonUnavailablePlacementPod, Message: fmt.Sprintf("%v of requested instances are unavailable of placement deployment %q %q", unavailablePod, clusterManagerNamespace, placementDeploymentName), } } return metav1.Condition{ - Type: placementDegraded, + Type: operatorapiv1.ConditionHubPlacementDegraded, Status: metav1.ConditionFalse, - Reason: "PlacementFunctional", + Reason: operatorapiv1.ReasonPlacementFunctional, Message: "Placement is scheduling placement decisions", } } diff --git a/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go index 8603ec824..66b8979df 100644 --- a/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/statuscontroller/clustermanager_status_controller_test.go @@ -36,7 +36,7 @@ func newClusterManager() *operatorapiv1.ClusterManager { Status: operatorapiv1.ClusterManagerStatus{ Conditions: []metav1.Condition{ { - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionTrue, }, }, @@ -76,7 +76,7 @@ func newPlacementDeployment(desiredReplica, availableReplica int32) *appsv1.Depl func TestSyncStatus(t *testing.T) { appliedCond := metav1.Condition{ - Type: clusterManagerApplied, + Type: operatorapiv1.ConditionClusterManagerApplied, Status: metav1.ConditionTrue, } cases := []struct { @@ -119,8 +119,8 @@ func TestSyncStatus(t *testing.T) { if err != nil { t.Fatal(err) } - expectedCondition1 := testinghelper.NamedCondition(registrationDegraded, "GetRegistrationDeploymentFailed", metav1.ConditionTrue) - expectedCondition2 := testinghelper.NamedCondition(placementDegraded, "UnavailablePlacementPod", metav1.ConditionTrue) + expectedCondition1 := testinghelper.NamedCondition(operatorapiv1.ConditionHubRegistrationDegraded, "GetRegistrationDeploymentFailed", metav1.ConditionTrue) + expectedCondition2 := testinghelper.NamedCondition(operatorapiv1.ConditionHubPlacementDegraded, "UnavailablePlacementPod", metav1.ConditionTrue) testinghelper.AssertOnlyConditions(t, klusterlet, appliedCond, expectedCondition1, expectedCondition2) }, }, @@ -140,8 +140,8 @@ func TestSyncStatus(t *testing.T) { if err != nil { t.Fatal(err) } - expectedCondition1 := testinghelper.NamedCondition(registrationDegraded, "UnavailableRegistrationPod", metav1.ConditionTrue) - expectedCondition2 := testinghelper.NamedCondition(placementDegraded, "PlacementFunctional", metav1.ConditionFalse) + expectedCondition1 := testinghelper.NamedCondition(operatorapiv1.ConditionHubRegistrationDegraded, "UnavailableRegistrationPod", metav1.ConditionTrue) + expectedCondition2 := testinghelper.NamedCondition(operatorapiv1.ConditionHubPlacementDegraded, "PlacementFunctional", metav1.ConditionFalse) testinghelper.AssertOnlyConditions(t, klusterlet, appliedCond, expectedCondition1, expectedCondition2) }, }, @@ -158,8 +158,8 @@ func TestSyncStatus(t *testing.T) { if err != nil { t.Fatal(err) } - expectedCondition1 := testinghelper.NamedCondition(registrationDegraded, "RegistrationFunctional", metav1.ConditionFalse) - expectedCondition2 := testinghelper.NamedCondition(placementDegraded, "GetPlacementDeploymentFailed", metav1.ConditionTrue) + expectedCondition1 := testinghelper.NamedCondition(operatorapiv1.ConditionHubRegistrationDegraded, "RegistrationFunctional", metav1.ConditionFalse) + expectedCondition2 := testinghelper.NamedCondition(operatorapiv1.ConditionHubPlacementDegraded, "GetPlacementDeploymentFailed", metav1.ConditionTrue) testinghelper.AssertOnlyConditions(t, klusterlet, appliedCond, expectedCondition1, expectedCondition2) }, }, diff --git a/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go index 15eb6e7b6..b71027cd6 100644 --- a/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go +++ b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller.go @@ -15,7 +15,6 @@ import ( ) const ( - imagePullSecret = "open-cluster-management-image-pull-credentials" addonInstallNamespaceLabelKey = "addon.open-cluster-management.io/namespace" ) @@ -76,9 +75,9 @@ func (c *addonPullImageSecretController) sync(ctx context.Context, controllerCon c.kubeClient.CoreV1(), c.recorder, c.operatorNamespace, - imagePullSecret, + helpers.ImagePullSecret, namespace, - imagePullSecret, + helpers.ImagePullSecret, []metav1.OwnerReference{}, ) if err != nil { diff --git a/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go index 9d2a7769a..71b84e105 100644 --- a/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/addonsecretcontroller/controller_test.go @@ -13,6 +13,7 @@ import ( kubefake "k8s.io/client-go/kubernetes/fake" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" + "open-cluster-management.io/ocm/pkg/operator/helpers" ) func TestSync(t *testing.T) { @@ -53,7 +54,7 @@ func TestSync(t *testing.T) { objects: []runtime.Object{ &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: imagePullSecret, + Name: helpers.ImagePullSecret, Namespace: "open-cluster-management", }, Data: map[string][]byte{ @@ -71,7 +72,7 @@ func TestSync(t *testing.T) { }, }, verify: func(t *testing.T, client *kubefake.Clientset) { - secret, err := client.CoreV1().Secrets("ns1").Get(context.TODO(), imagePullSecret, metav1.GetOptions{}) + secret, err := client.CoreV1().Secrets("ns1").Get(context.TODO(), helpers.ImagePullSecret, metav1.GetOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go index 7a29b4529..506c65206 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller.go @@ -267,7 +267,7 @@ func readyToAddHostedFinalizer(klusterlet *operatorapiv1.Klusterlet, mode operat return false } - return meta.IsStatusConditionTrue(klusterlet.Status.Conditions, klusterletReadyToApply) + return meta.IsStatusConditionTrue(klusterlet.Status.Conditions, operatorapiv1.ConditionReadyToApply) } func hasFinalizer(klusterlet *operatorapiv1.Klusterlet, finalizer string) bool { diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go index 7f6c1f894..966723fd6 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_cleanup_controller_test.go @@ -12,6 +12,7 @@ import ( clienttesting "k8s.io/client-go/testing" "k8s.io/klog/v2" + operatorapiv1 "open-cluster-management.io/api/operator/v1" workv1 "open-cluster-management.io/api/work/v1" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" @@ -73,7 +74,7 @@ func TestSyncDelete(t *testing.T) { func TestSyncDeleteHosted(t *testing.T) { klusterlet := newKlusterletHosted("klusterlet", "testns", "cluster1") meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletReadyToApply, Status: metav1.ConditionTrue, Reason: "KlusterletPrepared", + Type: operatorapiv1.ConditionReadyToApply, Status: metav1.ConditionTrue, Reason: "KlusterletPrepared", Message: "Klusterlet is ready to apply", }) now := metav1.Now() @@ -149,7 +150,7 @@ func TestSyncDeleteHostedDeleteAgentNamespace(t *testing.T) { newKlusterletHosted("klusterlet", "testns", "cluster1"), klusterletHostedFinalizer) meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletReadyToApply, Status: metav1.ConditionFalse, Reason: "KlusterletPrepareFailed", + Type: operatorapiv1.ConditionReadyToApply, Status: metav1.ConditionFalse, Reason: "KlusterletPrepareFailed", Message: fmt.Sprintf("Failed to build managed cluster clients: %v", "namespaces \"klusterlet\" not found"), }) now := metav1.Now() @@ -209,7 +210,7 @@ func TestSyncAddHostedFinalizerWhenKubeconfigReady(t *testing.T) { } meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletReadyToApply, Status: metav1.ConditionTrue, Reason: "KlusterletPrepared", + Type: operatorapiv1.ConditionReadyToApply, Status: metav1.ConditionTrue, Reason: "KlusterletPrepared", Message: "Klusterlet is ready to apply", }) if err := c.operatorStore.Update(klusterlet); err != nil { diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go index 2aa67d4c7..597639133 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller.go @@ -39,11 +39,6 @@ const ( // klusterletHostedFinalizer is used to clean up resources on the managed/hosted cluster in Hosted mode klusterletHostedFinalizer = "operator.open-cluster-management.io/klusterlet-hosted-cleanup" klusterletFinalizer = "operator.open-cluster-management.io/klusterlet-cleanup" - imagePullSecret = "open-cluster-management-image-pull-credentials" - klusterletApplied = "Applied" - klusterletReadyToApply = "ReadyToApply" - hubConnectionDegraded = "HubConnectionDegraded" - hubKubeConfigSecretMissing = "HubKubeConfigSecretMissing" // #nosec G101 managedResourcesEvictionTimestampAnno = "operator.open-cluster-management.io/managed-resources-eviction-timestamp" ) @@ -220,12 +215,12 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto if helpers.IsHosted(config.InstallMode) { if err != nil { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletReadyToApply, Status: metav1.ConditionFalse, Reason: "KlusterletPrepareFailed", + Type: operatorapiv1.ConditionReadyToApply, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonKlusterletPrepareFailed, Message: fmt.Sprintf("Failed to build managed cluster clients: %v", err), }) } else { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletReadyToApply, Status: metav1.ConditionTrue, Reason: "KlusterletPrepared", + Type: operatorapiv1.ConditionReadyToApply, Status: metav1.ConditionTrue, Reason: operatorapiv1.ReasonKlusterletPrepared, Message: "Klusterlet is ready to apply", }) } @@ -334,7 +329,7 @@ func (n *klusterletController) sync(ctx context.Context, controllerContext facto if len(errs) == 0 { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionTrue, Reason: "KlusterletApplied", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionTrue, Reason: operatorapiv1.ReasonKlusterletApplied, Message: "Klusterlet Component Applied"}) } else { // When appliedCondition is false, we should not update related resources and resource generations @@ -395,15 +390,15 @@ func syncPullSecret(ctx context.Context, sourceClient, targetClient kubernetes.I targetClient.CoreV1(), recorder, operatorNamespace, - imagePullSecret, + helpers.ImagePullSecret, namespace, - imagePullSecret, + helpers.ImagePullSecret, []metav1.OwnerReference{}, ) if err != nil { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "KlusterletApplyFailed", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonKlusterletApplyFailed, Message: fmt.Sprintf("Failed to sync image pull secret to namespace %q: %v", namespace, err)}) return err } @@ -414,7 +409,7 @@ func ensureNamespace(ctx context.Context, kubeClient kubernetes.Interface, klust namespace string, recorder events.Recorder) error { if err := ensureAgentNamespace(ctx, kubeClient, namespace, recorder); err != nil { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "KlusterletApplyFailed", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonKlusterletApplyFailed, Message: fmt.Sprintf("Failed to ensure namespace %q: %v", namespace, err)}) return err } diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go index 9dec056fd..33f23cfc3 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go @@ -551,7 +551,7 @@ func TestSyncDeploy(t *testing.T) { } testinghelper.AssertOnlyConditions( t, klusterlet, - testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletApplied, "KlusterletApplied", metav1.ConditionTrue), testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue), ) } @@ -612,7 +612,7 @@ func TestSyncDeploySingleton(t *testing.T) { } testinghelper.AssertOnlyConditions( t, klusterlet, - testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletApplied, "KlusterletApplied", metav1.ConditionTrue), testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue), ) } @@ -621,7 +621,7 @@ func TestSyncDeploySingleton(t *testing.T) { func TestSyncDeployHosted(t *testing.T) { klusterlet := newKlusterletHosted("klusterlet", "testns", "cluster1") meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletReadyToApply, Status: metav1.ConditionTrue, Reason: "KlusterletPrepared", + Type: operatorapiv1.ConditionReadyToApply, Status: metav1.ConditionTrue, Reason: "KlusterletPrepared", Message: "Klusterlet is ready to apply", }) agentNamespace := helpers.AgentNamespace(klusterlet) @@ -631,7 +631,7 @@ func TestSyncDeployHosted(t *testing.T) { // externalManagedSecret := newSecret(helpers.ExternalManagedKubeConfig, agentNamespace) // externalManagedSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig") namespace := newNamespace(agentNamespace) - pullSecret := newSecret(imagePullSecret, "open-cluster-management") + pullSecret := newSecret(helpers.ImagePullSecret, "open-cluster-management") syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet") controller := newTestControllerHosted(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret, @@ -707,8 +707,8 @@ func TestSyncDeployHosted(t *testing.T) { klog.Infof("operator actions, verb:%v \t resource:%v \t namespace:%v", action.GetVerb(), action.GetResource(), action.GetNamespace()) } - conditionReady := testinghelper.NamedCondition(klusterletReadyToApply, "KlusterletPrepared", metav1.ConditionTrue) - conditionApplied := testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue) + conditionReady := testinghelper.NamedCondition(operatorapiv1.ConditionReadyToApply, "KlusterletPrepared", metav1.ConditionTrue) + conditionApplied := testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletApplied, "KlusterletApplied", metav1.ConditionTrue) conditionFeaturesValid := testinghelper.NamedCondition( helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue) testingcommon.AssertActions(t, operatorAction, "patch") @@ -726,7 +726,7 @@ func TestSyncDeployHosted(t *testing.T) { func TestSyncDeployHostedCreateAgentNamespace(t *testing.T) { klusterlet := newKlusterletHosted("klusterlet", "testns", "cluster1") meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletReadyToApply, Status: metav1.ConditionFalse, Reason: "KlusterletPrepareFailed", + Type: operatorapiv1.ConditionReadyToApply, Status: metav1.ConditionFalse, Reason: "KlusterletPrepareFailed", Message: "Failed to build managed cluster clients: secrets \"external-managed-kubeconfig\" not found", }) syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet") @@ -815,7 +815,7 @@ func TestReplica(t *testing.T) { klusterlet = newKlusterlet("klusterlet", "testns", "cluster1") klusterlet.Status.Conditions = []metav1.Condition{ { - Type: hubConnectionDegraded, + Type: operatorapiv1.ConditionHubConnectionDegraded, Status: metav1.ConditionFalse, }, } @@ -952,7 +952,7 @@ func TestSyncWithPullSecret(t *testing.T) { hubKubeConfigSecret := newSecret(helpers.HubKubeConfig, "testns") hubKubeConfigSecret.Data["kubeconfig"] = []byte("dummuykubeconnfig") namespace := newNamespace("testns") - pullSecret := newSecret(imagePullSecret, "open-cluster-management") + pullSecret := newSecret(helpers.ImagePullSecret, "open-cluster-management") syncContext := testingcommon.NewFakeSyncContext(t, "klusterlet") controller := newTestController(t, klusterlet, syncContext.Recorder(), nil, bootStrapSecret, hubKubeConfigSecret, namespace, pullSecret) @@ -970,7 +970,7 @@ func TestSyncWithPullSecret(t *testing.T) { } } - if createdSecret == nil || createdSecret.Name != imagePullSecret { + if createdSecret == nil || createdSecret.Name != helpers.ImagePullSecret { t.Errorf("Failed to sync pull secret") } } @@ -1024,7 +1024,7 @@ func TestDeployOnKube111(t *testing.T) { testinghelper.AssertOnlyConditions( t, updatedKlusterlet, - testinghelper.NamedCondition(klusterletApplied, "KlusterletApplied", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletApplied, "KlusterletApplied", metav1.ConditionTrue), testinghelper.NamedCondition(helpers.FeatureGatesTypeValid, helpers.FeatureGatesReasonAllValid, metav1.ConditionTrue), ) diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go index b963f0f81..27417254d 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_crd_reconcile.go @@ -85,7 +85,7 @@ func (r *crdReconcile) reconcile(ctx context.Context, klusterlet *operatorapiv1. if applyErr != nil { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "CRDApplyFailed", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonKlusterletCRDApplyFailed, Message: applyErr.Error(), }) return klusterlet, reconcileStop, applyErr diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go index 733cc4a9b..5ad9ca4e8 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_managed_reconcile.go @@ -131,7 +131,7 @@ func (r *managedReconcile) reconcile(ctx context.Context, klusterlet *operatorap if len(errs) > 0 { applyErrors := utilerrors.NewAggregate(errs) meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "ManagedClusterResourceApplyFailed", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonManagedClusterResourceApplyFailed, Message: applyErrors.Error(), }) return klusterlet, reconcileStop, applyErrors diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go index e11b426bb..91c8aaab5 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_management_recocile.go @@ -87,7 +87,7 @@ func (r *managementReconcile) reconcile(ctx context.Context, klusterlet *operato if len(errs) > 0 { applyErrors := utilerrors.NewAggregate(errs) meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "ManagementClusterResourceApplyFailed", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonManagementClusterResourceApplyFailed, Message: applyErrors.Error(), }) return klusterlet, reconcileStop, applyErrors diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go index 31b494a40..588b27e29 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_runtime_reconcile.go @@ -96,11 +96,11 @@ func (r *runtimeReconcile) installAgent(ctx context.Context, klusterlet *operato // * The work agent should not be scaled to 0 in degraded condition with other reasons, // because we still need work agent running even though the hub kubconfig is missing some certain permission. // It can ensure work agent to clean up the resources defined in manifestworks when cluster is detaching from the hub. - hubConnectionDegradedCondition := meta.FindStatusCondition(klusterlet.Status.Conditions, hubConnectionDegraded) + hubConnectionDegradedCondition := meta.FindStatusCondition(klusterlet.Status.Conditions, operatorapiv1.ConditionHubConnectionDegraded) if hubConnectionDegradedCondition == nil { workConfig.Replica = 0 } else if hubConnectionDegradedCondition.Status == metav1.ConditionTrue && - strings.Contains(hubConnectionDegradedCondition.Reason, hubKubeConfigSecretMissing) { + strings.Contains(hubConnectionDegradedCondition.Reason, operatorapiv1.ReasonHubKubeConfigSecretMissing) { workConfig.Replica = 0 } @@ -203,7 +203,7 @@ func (r *runtimeReconcile) createManagedClusterKubeconfig( r.managedClusterClients.kubeconfig, r.kubeClient.CoreV1(), tokenGetter, recorder) if err != nil { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "KlusterletApplyFailed", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonKlusterletApplyFailed, Message: fmt.Sprintf("Failed to create managed kubeconfig secret %s with error %v", secretName, err), }) } @@ -214,7 +214,7 @@ func (r *runtimeReconcile) getClusterNameFromHubKubeConfigSecret(ctx context.Con hubSecret, err := r.kubeClient.CoreV1().Secrets(namespace).Get(ctx, helpers.HubKubeConfig, metav1.GetOptions{}) if err != nil { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "KlusterletApplyFailed", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonKlusterletApplyFailed, Message: fmt.Sprintf("Failed to get cluster name from hub kubeconfig secret with error %v", err), }) return "", err @@ -223,7 +223,7 @@ func (r *runtimeReconcile) getClusterNameFromHubKubeConfigSecret(ctx context.Con clusterName := hubSecret.Data["cluster-name"] if len(clusterName) == 0 { meta.SetStatusCondition(&klusterlet.Status.Conditions, metav1.Condition{ - Type: klusterletApplied, Status: metav1.ConditionFalse, Reason: "KlusterletApplyFailed", + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionFalse, Reason: operatorapiv1.ReasonKlusterletApplyFailed, Message: fmt.Sprintf("Failed to get cluster name from hub kubeconfig secret with error %v", err), }) return "", fmt.Errorf("the cluster name in the secret is empty") diff --git a/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go index 89f833c27..fb9abe1a7 100644 --- a/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller.go @@ -43,10 +43,6 @@ type klusterletLocker struct { klusterletInChecking map[string]struct{} } -const ( - hubConnectionDegraded = "HubConnectionDegraded" -) - func NewKlusterletSSARController( kubeClient kubernetes.Interface, klusterletClient operatorv1client.KlusterletInterface, @@ -125,7 +121,7 @@ func (c *ssarController) sync(ctx context.Context, controllerContext factory.Syn hubConfigDegradedCondition := checkAgentDegradedCondition( ctx, c.kubeClient, - hubConnectionDegraded, + operatorapiv1.ConditionHubConnectionDegraded, klusterletAgent{ clusterName: klusterlet.Spec.ClusterName, namespace: agentNamespace, @@ -149,7 +145,7 @@ func (c *ssarController) sync(ctx context.Context, controllerContext factory.Syn bootstrapDegradedCondition := checkAgentDegradedCondition( ctx, c.kubeClient, - hubConnectionDegraded, + operatorapiv1.ConditionHubConnectionDegraded, klusterletAgent{ clusterName: klusterlet.Spec.ClusterName, namespace: agentNamespace, @@ -161,7 +157,7 @@ func (c *ssarController) sync(ctx context.Context, controllerContext factory.Syn // The status is always true here since the hub kubeconfig check fails. Need to add the additional // message relating to bootstrap kubeconfig check here. meta.SetStatusCondition(&newKlusterlet.Status.Conditions, metav1.Condition{ - Type: hubConnectionDegraded, + Type: operatorapiv1.ConditionHubConnectionDegraded, Status: metav1.ConditionTrue, ObservedGeneration: klusterlet.Generation, Reason: bootstrapDegradedCondition.Reason + "," + hubConfigDegradedCondition.Reason, @@ -267,7 +263,7 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface, if err != nil { return metav1.Condition{ Status: metav1.ConditionTrue, - Reason: "HubKubeConfigSecretMissing", + Reason: operatorapiv1.ReasonHubKubeConfigSecretMissing, Message: fmt.Sprintf("Failed to get hub kubeconfig secret %q %q: %v", agent.namespace, helpers.HubKubeConfig, err), } } @@ -275,7 +271,7 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface, if hubConfigSecret.Data["kubeconfig"] == nil { return metav1.Condition{ Status: metav1.ConditionTrue, - Reason: "HubKubeConfigMissing", + Reason: operatorapiv1.ReasonHubKubeConfigMissing, Message: fmt.Sprintf("Failed to get kubeconfig from `kubectl get secret -n %q %q -ojsonpath='{.data.kubeconfig}'`. "+ "This is set by the klusterlet registration deployment, but the CSR must be approved by the cluster-admin on the hub.", hubConfigSecret.Namespace, hubConfigSecret.Name), @@ -286,7 +282,7 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface, if err != nil { return metav1.Condition{ Status: metav1.ConditionTrue, - Reason: "HubKubeConfigError", + Reason: operatorapiv1.ReasonHubKubeConfigError, Message: fmt.Sprintf("Failed to build hub kube client with hub config secret %q %q: %v", hubConfigSecret.Namespace, hubConfigSecret.Name, err), } @@ -298,7 +294,7 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface, if hubConfigSecret.Data["cluster-name"] == nil { return metav1.Condition{ Status: metav1.ConditionTrue, - Reason: "ClusterNameMissing", + Reason: operatorapiv1.ReasonClusterNameMissing, Message: fmt.Sprintf( "Failed to get cluster name from `kubectl get secret -n %q %q -ojsonpath='{.data.cluster-name}`."+ " This is set by the klusterlet registration deployment.", hubConfigSecret.Namespace, hubConfigSecret.Name), @@ -312,7 +308,7 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface, if err != nil { return metav1.Condition{ Status: metav1.ConditionTrue, - Reason: "HubKubeConfigError", + Reason: operatorapiv1.ReasonHubKubeConfigError, Message: fmt.Sprintf("Failed to create %+v with hub config secret %q/%q to apiserver %s: %v", failedReview, hubConfigSecret.Namespace, hubConfigSecret.Name, host, err), } @@ -320,7 +316,7 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface, if !allowed { return metav1.Condition{ Status: metav1.ConditionTrue, - Reason: "HubKubeConfigUnauthorized", + Reason: operatorapiv1.ReasonHubKubeConfigUnauthorized, Message: fmt.Sprintf("Operation for resource %+v is not allowed with hub config secret %q/%q to apiserver %s", failedReview.Spec.ResourceAttributes, hubConfigSecret.Namespace, hubConfigSecret.Name, host), } @@ -328,7 +324,7 @@ func checkHubConfigSecret(ctx context.Context, kubeClient kubernetes.Interface, return metav1.Condition{ Status: metav1.ConditionFalse, - Reason: "HubConnectionFunctional", + Reason: operatorapiv1.ReasonHubConnectionFunctional, Message: fmt.Sprintf("Hub kubeconfig secret %s/%s to apiserver %s is working", agent.namespace, helpers.HubKubeConfig, host), } diff --git a/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go index 21e742a07..d240a6429 100644 --- a/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/ssarcontroller/klusterlet_ssar_controller_test.go @@ -184,7 +184,7 @@ func TestSync(t *testing.T) { allowToOperateManagedClusterStatus: false, allowToOperateManifestWorks: false, expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(hubConnectionDegraded, "BootstrapSecretMissing,HubKubeConfigUnauthorized", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionHubConnectionDegraded, "BootstrapSecretMissing,HubKubeConfigUnauthorized", metav1.ConditionTrue), }, }, { @@ -197,7 +197,7 @@ func TestSync(t *testing.T) { allowToOperateManagedClusterStatus: true, allowToOperateManifestWorks: true, expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(hubConnectionDegraded, "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionHubConnectionDegraded, "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue), }, }, { @@ -211,7 +211,7 @@ func TestSync(t *testing.T) { allowToOperateManifestWorks: false, klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(hubConnectionDegraded, "BootstrapSecretError,HubKubeConfigUnauthorized", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionHubConnectionDegraded, "BootstrapSecretError,HubKubeConfigUnauthorized", metav1.ConditionTrue), }, }, { @@ -225,7 +225,7 @@ func TestSync(t *testing.T) { allowToOperateManifestWorks: true, klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(hubConnectionDegraded, "BootstrapSecretFunctional,HubKubeConfigError", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionHubConnectionDegraded, "BootstrapSecretFunctional,HubKubeConfigError", metav1.ConditionTrue), }, }, { @@ -239,7 +239,7 @@ func TestSync(t *testing.T) { allowToOperateManifestWorks: false, klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(hubConnectionDegraded, "BootstrapSecretUnauthorized,HubKubeConfigUnauthorized", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionHubConnectionDegraded, "BootstrapSecretUnauthorized,HubKubeConfigUnauthorized", metav1.ConditionTrue), }, }, { @@ -253,7 +253,7 @@ func TestSync(t *testing.T) { allowToOperateManifestWorks: true, klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(hubConnectionDegraded, "HubConnectionFunctional", metav1.ConditionFalse), + testinghelper.NamedCondition(operatorapiv1.ConditionHubConnectionDegraded, "HubConnectionFunctional", metav1.ConditionFalse), }, }, } diff --git a/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go index dba2450ea..31bce90fe 100644 --- a/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go +++ b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller.go @@ -32,13 +32,6 @@ type klusterletStatusController struct { klusterletLister operatorlister.KlusterletLister } -const ( - klusterletRegistrationDesiredDegraded = "RegistrationDesiredDegraded" - klusterletWorkDesiredDegraded = "WorkDesiredDegraded" - klusterletAvailable = "Available" - klusterletApplied = "Applied" -) - // NewKlusterletStatusController returns a klusterletStatusController func NewKlusterletStatusController( kubeClient kubernetes.Interface, @@ -75,7 +68,7 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext } // Do nothing when the klusterlet is not applied yet - if meta.FindStatusCondition(klusterlet.Status.Conditions, klusterletApplied) == nil { + if meta.FindStatusCondition(klusterlet.Status.Conditions, operatorapiv1.ConditionKlusterletApplied) == nil { return nil } @@ -107,11 +100,11 @@ func (k *klusterletStatusController) sync(ctx context.Context, controllerContext meta.SetStatusCondition(&newKlusterlet.Status.Conditions, availableCondition) registrationDesiredCondition := checkAgentDeploymentDesired(ctx, - k.kubeClient, agentNamespace, registrationDeploymentName, klusterletRegistrationDesiredDegraded) + k.kubeClient, agentNamespace, registrationDeploymentName, operatorapiv1.ConditionRegistrationDesiredDegraded) registrationDesiredCondition.ObservedGeneration = klusterlet.Generation meta.SetStatusCondition(&newKlusterlet.Status.Conditions, registrationDesiredCondition) - workDesiredCondition := checkAgentDeploymentDesired(ctx, k.kubeClient, agentNamespace, workDeploymentName, klusterletWorkDesiredDegraded) + workDesiredCondition := checkAgentDeploymentDesired(ctx, k.kubeClient, agentNamespace, workDeploymentName, operatorapiv1.ConditionWorkDesiredDegraded) workDesiredCondition.ObservedGeneration = klusterlet.Generation meta.SetStatusCondition(&newKlusterlet.Status.Conditions, workDesiredCondition) @@ -131,7 +124,7 @@ func checkAgentDeploymentDesired(ctx context.Context, kubeClient kubernetes.Inte return metav1.Condition{ Type: conditionType, Status: metav1.ConditionTrue, - Reason: "GetDeploymentFailed", + Reason: operatorapiv1.ReasonKlusterletGetDeploymentFailed, Message: fmt.Sprintf("Failed to get deployment %q %q: %v", namespace, deploymentName, err), } } @@ -139,7 +132,7 @@ func checkAgentDeploymentDesired(ctx context.Context, kubeClient kubernetes.Inte return metav1.Condition{ Type: conditionType, Status: metav1.ConditionTrue, - Reason: "UnavailablePods", + Reason: operatorapiv1.ReasonKlusterletUnavailablePods, Message: fmt.Sprintf("%v of requested instances are unavailable of deployment %q %q", unavailablePod, namespace, deploymentName), } @@ -147,7 +140,7 @@ func checkAgentDeploymentDesired(ctx context.Context, kubeClient kubernetes.Inte return metav1.Condition{ Type: conditionType, Status: metav1.ConditionFalse, - Reason: "DeploymentsFunctional", + Reason: operatorapiv1.ReasonKlusterletDeploymentsFunctional, Message: fmt.Sprintf("deployments replicas are desired: %d", *deployment.Spec.Replicas), } } @@ -159,17 +152,17 @@ func checkAgentsDeploymentAvailable(ctx context.Context, kubeClient kubernetes.I deployment, err := kubeClient.AppsV1().Deployments(agent.namespace).Get(ctx, agent.deploymentName, metav1.GetOptions{}) if err != nil { return metav1.Condition{ - Type: klusterletAvailable, + Type: operatorapiv1.ConditionKlusterletAvailable, Status: metav1.ConditionFalse, - Reason: "GetDeploymentFailed", + Reason: operatorapiv1.ReasonKlusterletGetDeploymentFailed, Message: fmt.Sprintf("Failed to get deployment %q %q: %v", agent.namespace, agent.deploymentName, err), } } if deployment.Status.AvailableReplicas <= 0 { return metav1.Condition{ - Type: klusterletAvailable, + Type: operatorapiv1.ConditionKlusterletAvailable, Status: metav1.ConditionFalse, - Reason: "NoAvailablePods", + Reason: operatorapiv1.ReasonKlusterletNoAvailablePods, Message: fmt.Sprintf("%v of requested instances are available of deployment %q %q", deployment.Status.AvailableReplicas, agent.namespace, agent.deploymentName), } @@ -178,9 +171,9 @@ func checkAgentsDeploymentAvailable(ctx context.Context, kubeClient kubernetes.I } return metav1.Condition{ - Type: klusterletAvailable, + Type: operatorapiv1.ConditionKlusterletAvailable, Status: metav1.ConditionTrue, - Reason: "klusterletAvailable", + Reason: operatorapiv1.ReasonKlusterletAvailable, Message: fmt.Sprintf("deployments are ready: %s", strings.Join(availableMessages, ",")), } } diff --git a/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go index 751b077f9..b2b21457b 100644 --- a/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/statuscontroller/klusterlet_status_controller_test.go @@ -43,7 +43,7 @@ func newKlusterlet(name, namespace, clustername string) *operatorapiv1.Klusterle Status: operatorapiv1.KlusterletStatus{ Conditions: []metav1.Condition{ { - Type: klusterletApplied, + Type: operatorapiv1.ConditionKlusterletApplied, Status: metav1.ConditionTrue, }, }, @@ -108,9 +108,9 @@ func TestSync(t *testing.T) { klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(klusterletAvailable, "NoAvailablePods", metav1.ConditionFalse), - testinghelper.NamedCondition(klusterletRegistrationDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), - testinghelper.NamedCondition(klusterletWorkDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletAvailable, "NoAvailablePods", metav1.ConditionFalse), + testinghelper.NamedCondition(operatorapiv1.ConditionRegistrationDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionWorkDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), }, }, { @@ -121,9 +121,9 @@ func TestSync(t *testing.T) { }, klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(klusterletAvailable, "NoAvailablePods", metav1.ConditionFalse), - testinghelper.NamedCondition(klusterletRegistrationDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), - testinghelper.NamedCondition(klusterletWorkDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletAvailable, "NoAvailablePods", metav1.ConditionFalse), + testinghelper.NamedCondition(operatorapiv1.ConditionRegistrationDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionWorkDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), }, }, { @@ -134,9 +134,9 @@ func TestSync(t *testing.T) { }, klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(klusterletAvailable, "NoAvailablePods", metav1.ConditionFalse), - testinghelper.NamedCondition(klusterletRegistrationDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), - testinghelper.NamedCondition(klusterletWorkDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletAvailable, "NoAvailablePods", metav1.ConditionFalse), + testinghelper.NamedCondition(operatorapiv1.ConditionRegistrationDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionWorkDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), }, }, { @@ -147,9 +147,9 @@ func TestSync(t *testing.T) { }, klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(klusterletAvailable, "klusterletAvailable", metav1.ConditionTrue), - testinghelper.NamedCondition(klusterletRegistrationDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), - testinghelper.NamedCondition(klusterletWorkDesiredDegraded, "UnavailablePods", metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletAvailable, operatorapiv1.ReasonKlusterletAvailable, metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionRegistrationDesiredDegraded, operatorapiv1.ReasonKlusterletUnavailablePods, metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionWorkDesiredDegraded, operatorapiv1.ReasonKlusterletUnavailablePods, metav1.ConditionTrue), }, }, { @@ -160,9 +160,12 @@ func TestSync(t *testing.T) { }, klusterlet: newKlusterlet("testklusterlet", "test", "cluster1"), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(klusterletAvailable, "klusterletAvailable", metav1.ConditionTrue), - testinghelper.NamedCondition(klusterletRegistrationDesiredDegraded, "DeploymentsFunctional", metav1.ConditionFalse), - testinghelper.NamedCondition(klusterletWorkDesiredDegraded, "DeploymentsFunctional", metav1.ConditionFalse), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletAvailable, + operatorapiv1.ReasonKlusterletAvailable, metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionRegistrationDesiredDegraded, + operatorapiv1.ReasonKlusterletDeploymentsFunctional, metav1.ConditionFalse), + testinghelper.NamedCondition(operatorapiv1.ConditionWorkDesiredDegraded, + operatorapiv1.ReasonKlusterletDeploymentsFunctional, metav1.ConditionFalse), }, }, { @@ -176,9 +179,12 @@ func TestSync(t *testing.T) { return k }(), expectedConditions: []metav1.Condition{ - testinghelper.NamedCondition(klusterletAvailable, "klusterletAvailable", metav1.ConditionTrue), - testinghelper.NamedCondition(klusterletRegistrationDesiredDegraded, "DeploymentsFunctional", metav1.ConditionFalse), - testinghelper.NamedCondition(klusterletWorkDesiredDegraded, "DeploymentsFunctional", metav1.ConditionFalse), + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletAvailable, + operatorapiv1.ReasonKlusterletAvailable, metav1.ConditionTrue), + testinghelper.NamedCondition(operatorapiv1.ConditionRegistrationDesiredDegraded, + operatorapiv1.ReasonKlusterletDeploymentsFunctional, metav1.ConditionFalse), + testinghelper.NamedCondition(operatorapiv1.ConditionWorkDesiredDegraded, + operatorapiv1.ReasonKlusterletDeploymentsFunctional, metav1.ConditionFalse), }, }, } @@ -202,7 +208,8 @@ func TestSync(t *testing.T) { t.Fatal(err) } expectedConditions := c.expectedConditions - meta.SetStatusCondition(&expectedConditions, testinghelper.NamedCondition(klusterletApplied, "", metav1.ConditionTrue)) + meta.SetStatusCondition(&expectedConditions, + testinghelper.NamedCondition(operatorapiv1.ConditionKlusterletApplied, "", metav1.ConditionTrue)) c.expectedConditions = expectedConditions testinghelper.AssertOnlyConditions(t, klusterlet, c.expectedConditions...) }) diff --git a/pkg/operator/operators/klusterlet/options.go b/pkg/operator/operators/klusterlet/options.go index 58fd960f8..aaf3e257f 100644 --- a/pkg/operator/operators/klusterlet/options.go +++ b/pkg/operator/operators/klusterlet/options.go @@ -2,7 +2,6 @@ package klusterlet import ( "context" - "os" "time" "github.com/openshift/library-go/pkg/controller/controllercmd" @@ -25,9 +24,6 @@ import ( "open-cluster-management.io/ocm/pkg/operator/operators/klusterlet/controllers/statuscontroller" ) -// defaultSpokeComponentNamespace is the default namespace in which the operator is deployed -const defaultComponentNamespace = "open-cluster-management" - type Options struct { SkipPlaceholderHubSecret bool } @@ -90,13 +86,6 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext * return err } - // Read component namespace - operatorNamespace := defaultComponentNamespace - nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err == nil { - operatorNamespace = string(nsBytes) - } - klusterletController := klusterletcontroller.NewKlusterletController( kubeClient, apiExtensionClient, @@ -106,7 +95,7 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext * deploymentInformer.Apps().V1().Deployments(), workClient.WorkV1().AppliedManifestWorks(), kubeVersion, - operatorNamespace, + helpers.GetOperatorNamespace(), controllerContext.EventRecorder, o.SkipPlaceholderHubSecret) @@ -119,7 +108,7 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext * deploymentInformer.Apps().V1().Deployments(), workClient.WorkV1().AppliedManifestWorks(), kubeVersion, - operatorNamespace, + helpers.GetOperatorNamespace(), controllerContext.EventRecorder) ssarController := ssarcontroller.NewKlusterletSSARController( @@ -140,7 +129,7 @@ func (o *Options) RunKlusterletOperator(ctx context.Context, controllerContext * addonController := addonsecretcontroller.NewAddonPullImageSecretController( kubeClient, - operatorNamespace, + helpers.GetOperatorNamespace(), kubeInformer.Core().V1().Namespaces(), controllerContext.EventRecorder, ) diff --git a/test/integration/operator/klusterlet_test.go b/test/integration/operator/klusterlet_test.go index 212c79c8f..0fde5ad4a 100644 --- a/test/integration/operator/klusterlet_test.go +++ b/test/integration/operator/klusterlet_test.go @@ -843,7 +843,7 @@ var _ = ginkgo.Describe("Klusterlet", func() { _, err = kubeClient.AppsV1().Deployments(klusterletNamespace).UpdateStatus(context.Background(), workDeployment, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "Available", "klusterletAvailable", metav1.ConditionTrue) + util.AssertKlusterletCondition(klusterlet.Name, operatorClient, "Available", "KlusterletAvailable", metav1.ConditionTrue) }) }) diff --git a/vendor/modules.txt b/vendor/modules.txt index 2d8920d9a..6d1b14233 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1507,7 +1507,7 @@ open-cluster-management.io/addon-framework/pkg/index open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner open-cluster-management.io/addon-framework/pkg/utils -# open-cluster-management.io/api v0.13.0 +# open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c ## explicit; go 1.21 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned diff --git a/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml b/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml index 8cc24234b..8abdb8e37 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml +++ b/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml @@ -323,6 +323,8 @@ spec: type: string type: object workConfiguration: + default: + workDriver: kube description: WorkConfiguration contains the configuration of work properties: featureGates: @@ -356,6 +358,22 @@ spec: - feature type: object type: array + workDriver: + default: kube + description: "WorkDriver represents the type of work driver. Possible + values are \"kube\", \"mqtt\", or \"grpc\". If not provided, + the default value is \"kube\". If set to non-\"kube\" drivers, + the klusterlet need to use the same driver. and the driver configuration + must be provided in a secret named \"work-driver-config\" in + the namespace where the cluster manager is running, adhering + to the following structure: config.yaml: | + \n For detailed driver configuration, please refer to the sdk-go + documentation: https://github.com/open-cluster-management-io/sdk-go/blob/main/pkg/cloudevents/README.md#supported-protocols-and-drivers" + enum: + - kube + - mqtt + - grpc + type: string type: object workImagePullSpec: default: quay.io/open-cluster-management/work diff --git a/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go b/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go index 33b0804f9..373f87f10 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go +++ b/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go @@ -65,6 +65,7 @@ type ClusterManagerSpec struct { // WorkConfiguration contains the configuration of work // +optional + // +kubebuilder:default={workDriver: kube} WorkConfiguration *WorkConfiguration `json:"workConfiguration,omitempty"` // AddOnManagerConfiguration contains the configuration of addon manager @@ -119,8 +120,35 @@ type WorkConfiguration struct { // he can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false. // +optional FeatureGates []FeatureGate `json:"featureGates,omitempty"` + + // WorkDriver represents the type of work driver. Possible values are "kube", "mqtt", or "grpc". + // If not provided, the default value is "kube". + // If set to non-"kube" drivers, the klusterlet need to use the same driver. + // and the driver configuration must be provided in a secret named "work-driver-config" + // in the namespace where the cluster manager is running, adhering to the following structure: + // config.yaml: | + // + // + // For detailed driver configuration, please refer to the sdk-go documentation: https://github.com/open-cluster-management-io/sdk-go/blob/main/pkg/cloudevents/README.md#supported-protocols-and-drivers + // + // +optional + // +kubebuilder:default:=kube + // +kubebuilder:validation:Enum=kube;mqtt;grpc + WorkDriver WorkDriverType `json:"workDriver,omitempty"` } +// WorkDriverType represents the type of work driver. +type WorkDriverType string + +const ( + // WorkDriverTypeKube is the work driver type for kube. + WorkDriverTypeKube WorkDriverType = "kube" + // WorkDriverTypeMqtt is the work driver type for mqtt. + WorkDriverTypeMqtt WorkDriverType = "mqtt" + // WorkDriverTypeGrpc is the work driver type for grpc. + WorkDriverTypeGrpc WorkDriverType = "grpc" +) + type AddOnManagerConfiguration struct { // FeatureGates represents the list of feature gates for addon manager // If it is set empty, default feature gates will be used. @@ -311,3 +339,54 @@ type ClusterManagerList struct { // Items is a list of deployment configurations for registration and work distribution controllers. Items []ClusterManager `json:"items"` } + +const ( + // The types of ClusterManager condition status. + // ConditionClusterManagerApplied is the ClusterManager condition status which means all components have been applied on the hub. + ConditionClusterManagerApplied = "Applied" + // ConditionHubRegistrationDegraded is the ClusterManager condition status which means the registration is not ready to serve on the hub. + ConditionHubRegistrationDegraded = "HubRegistrationDegraded" + // ConditionHubPlacementDegraded is the ClusterManager condition status which means the placement is not ready to serve on the hub. + ConditionHubPlacementDegraded = "HubPlacementDegraded" + // ConditionProgressing is the ClusterManager condition status which means the ClusterManager are in upgrading phase. + ConditionProgressing = "Progressing" + // ConditionMigrationSucceeded is the ClusterManager condition status which means the API migration is succeeded on the hub. + ConditionMigrationSucceeded = "MigrationSucceeded" + + // ReasonClusterManagerApplied is the reason of the ConditionClusterManagerApplied condition to show all resources are applied. + ReasonClusterManagerApplied = "ClusterManagerApplied" + // ReasonRuntimeResourceApplyFailed is the reason of the ConditionClusterManagerApplied condition to show it is failed to apply deployments. + ReasonRuntimeResourceApplyFailed = "RuntimeResourceApplyFailed" + // ReasonServiceAccountSyncFailed is the reason of the ConditionClusterManagerApplied condition to show it is failed to apply serviceAccounts. + ReasonServiceAccountSyncFailed = "ServiceAccountSyncFailed" + // ReasonClusterManagerCRDApplyFailed is the reason of the ConditionClusterManagerApplied condition to show it is failed to apply CRDs. + ReasonClusterManagerCRDApplyFailed = "CRDApplyFailed" + // ReasonWebhookApplyFailed is the reason of the ConditionClusterManagerApplied condition to show it is failed to apply webhooks. + ReasonWebhookApplyFailed = "WebhookApplyFailed" + + // ReasonDeploymentRolling is the reason of the ConditionProgressing condition to show the deployed deployments are rolling. + ReasonDeploymentRolling = "ClusterManagerDeploymentRolling" + // ReasonUpToDate is the reason of the ConditionProgressing condition to show the deployed deployments are up-to-date. + ReasonUpToDate = "ClusterManagerUpToDate" + + // ReasonStorageVersionMigrationFailed is the reason of the ConditionMigrationSucceeded condition to show the API storageVersion migration is failed. + ReasonStorageVersionMigrationFailed = "StorageVersionMigrationFailed" + // ReasonStorageVersionMigrationProcessing is the reason of the ConditionMigrationSucceeded condition to show the API storageVersion migration is not completed. + ReasonStorageVersionMigrationProcessing = "StorageVersionMigrationProcessing" + // ReasonStorageVersionMigrationSucceed is the reason of the ConditionMigrationSucceeded condition to show the API storageVersion migration is succeeded. + ReasonStorageVersionMigrationSucceed = "StorageVersionMigrationSucceed" + + // ReasonGetRegistrationDeploymentFailed is the reason of the ConditionRegistrationDegraded condition to show getting registration deployment failed. + ReasonGetRegistrationDeploymentFailed = "GetRegistrationDeploymentFailed" + // ReasonUnavailableRegistrationPod is the reason of the ConditionRegistrationDegraded condition to show the registration pods are unavailable. + ReasonUnavailableRegistrationPod = "UnavailableRegistrationPod" + // ReasonRegistrationFunctional is the reason of the ConditionRegistrationDegraded condition to show registration is functional. + ReasonRegistrationFunctional = "RegistrationFunctional" + + // ReasonGetPlacementDeploymentFailed is the reason of the ConditionPlacementDegraded condition to show it is failed get placement deployment. + ReasonGetPlacementDeploymentFailed = "GetPlacementDeploymentFailed" + // ReasonUnavailablePlacementPod is the reason of the ConditionPlacementDegraded condition to show the registration pods are unavailable. + ReasonUnavailablePlacementPod = "UnavailablePlacementPod" + // ReasonPlacementFunctional is the reason of the ConditionPlacementDegraded condition to show placement is functional. + ReasonPlacementFunctional = "PlacementFunctional" +) diff --git a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go index 96c51fdb6..ea97392d5 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go +++ b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go @@ -244,3 +244,66 @@ type KlusterletList struct { // Items is a list of Klusterlet agents. Items []Klusterlet `json:"items"` } + +const ( + // The types of klusterlet condition status. + // ConditionKlusterletApplied is the klusterlet condition status which means all components have been applied on the managed cluster. + ConditionKlusterletApplied = "Applied" + // ConditionReadyToApply is a klusterlet condition status which means it is ready to apply the resources on the managed cluster. + ConditionReadyToApply = "ReadyToApply" + // ConditionKlusterletAvailable is the klusterlet condition status which means all components are available and ready to serve. + ConditionKlusterletAvailable = "Available" + // ConditionHubConnectionDegraded is the klusterlet condition status which means the agent on the managed cluster cannot access the hub cluster. + ConditionHubConnectionDegraded = "HubConnectionDegraded" + // ConditionRegistrationDesiredDegraded is the klusterlet condition status which means the registration agent on the managed cluster is not ready to serve. + ConditionRegistrationDesiredDegraded = "RegistrationDesiredDegraded" + // ConditionWorkDesiredDegraded is the klusterlet condition status which means the work agent on the managed cluster is not ready to serve. + ConditionWorkDesiredDegraded = "WorkDesiredDegraded" + + // ReasonKlusterletApplied is the reason of ConditionKlusterletApplied condition to show resources are applied. + ReasonKlusterletApplied = "KlusterletApplied" + // ReasonKlusterletApplyFailed is the reason of ConditionKlusterletApplied condition to show it is failed to apply resources. + ReasonKlusterletApplyFailed = "KlusterletApplyFailed" + // ReasonKlusterletCRDApplyFailed is the reason of ConditionKlusterletApplied condition to show it is failed to apply CRDs. + ReasonKlusterletCRDApplyFailed = "CRDApplyFailed" + // ReasonManagedClusterResourceApplyFailed is the reason of ConditionKlusterletApplied condition to show it is failed to apply resources on managed cluster. + ReasonManagedClusterResourceApplyFailed = "ManagedClusterResourceApplyFailed" + // ReasonManagementClusterResourceApplyFailed is the reason of ConditionKlusterletApplied condition to show it is failed to apply resources on management cluster. + ReasonManagementClusterResourceApplyFailed = "ManagementClusterResourceApplyFailed" + + // ReasonKlusterletPrepareFailed is the reason of ConditionReadyToApply condition to show it is failed to get the kubeConfig + // of managed cluster from the external-managed-kubeconfig secret in the hosted mode. + ReasonKlusterletPrepareFailed = "KlusterletPrepareFailed" + // ReasonKlusterletPrepared is the reason of ConditionReadyToApply condition to show the kubeConfig of managed cluster is + // validated from the external-managed-kubeconfig secret in the hosted mode. + ReasonKlusterletPrepared = "KlusterletPrepared" + + // ReasonKlusterletGetDeploymentFailed is the reason of ConditionKlusterletAvailable/ConditionRegistrationDesiredDegraded/ConditionWorkDesiredDegraded + // condition to show it is failed to get deployments. + ReasonKlusterletGetDeploymentFailed = "GetDeploymentFailed" + // ReasonKlusterletUnavailablePods is the reason of ConditionKlusterletAvailable/ConditionRegistrationDesiredDegraded/ConditionWorkDesiredDegraded + // condition to show there is unavailable pod. + ReasonKlusterletUnavailablePods = "UnavailablePods" + // ReasonKlusterletDeploymentsFunctional is the reason of ConditionKlusterletAvailable/ConditionRegistrationDesiredDegraded/ConditionWorkDesiredDegraded + // condition to show all deployments are functional. + ReasonKlusterletDeploymentsFunctional = "DeploymentsFunctional" + // ReasonKlusterletNoAvailablePods is the reason of ConditionKlusterletAvailable/ConditionRegistrationDesiredDegraded/ConditionWorkDesiredDegraded + // condition to show there is no available pod. + ReasonKlusterletNoAvailablePods = "NoAvailablePods" + + // ReasonKlusterletAvailable is the reason of ConditionKlusterletAvailable condition to show all deployed resources are available. + ReasonKlusterletAvailable = "KlusterletAvailable" + + // ReasonHubConnectionFunctional is the reason of ConditionHubConnectionDegraded condition to show spoke cluster connects hub cluster. + ReasonHubConnectionFunctional = "HubConnectionFunctional" + // ReasonHubKubeConfigSecretMissing is the reason of ConditionHubConnectionDegraded condition to show hubKubeConfigSecret is missing. + ReasonHubKubeConfigSecretMissing = "HubKubeConfigSecretMissing" + // ReasonHubKubeConfigMissing is the reason of ConditionHubConnectionDegraded condition to show hubKubeConfig in hubKubeConfigSecret is missing. + ReasonHubKubeConfigMissing = "HubKubeConfigMissing" + // ReasonHubKubeConfigError is the reason of ConditionHubConnectionDegraded condition to show it is failed to get hubKubeConfig. + ReasonHubKubeConfigError = "HubKubeConfigError" + // ReasonClusterNameMissing is the reason of ConditionHubConnectionDegraded condition to show the cluster-name is missing in the hubKubeConfigSecret. + ReasonClusterNameMissing = "ClusterNameMissing" + // ReasonHubKubeConfigUnauthorized is the reason of ConditionHubConnectionDegraded condition to show there is no permission to access hub using the hubKubeConfigSecret. + ReasonHubKubeConfigUnauthorized = "HubKubeConfigUnauthorized" +) diff --git a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go index 373b77999..7d4983e2b 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -156,6 +156,7 @@ func (WebhookConfiguration) SwaggerDoc() map[string]string { var map_WorkConfiguration = map[string]string{ "featureGates": "FeatureGates represents the list of feature gates for work If it is set empty, default feature gates will be used. If it is set, featuregate/Foo is an example of one item in FeatureGates:\n 1. If featuregate/Foo does not exist, registration-operator will discard it\n 2. If featuregate/Foo exists and is false by default. It is now possible to set featuregate/Foo=[false|true]\n 3. If featuregate/Foo exists and is true by default. If a cluster-admin upgrading from 1 to 2 wants to continue having featuregate/Foo=false,\n \the can set featuregate/Foo=false before upgrading. Let's say the cluster-admin wants featuregate/Foo=false.", + "workDriver": "WorkDriver represents the type of work driver. Possible values are \"kube\", \"mqtt\", or \"grpc\". If not provided, the default value is \"kube\". If set to non-\"kube\" drivers, the klusterlet need to use the same driver. and the driver configuration must be provided in a secret named \"work-driver-config\" in the namespace where the cluster manager is running, adhering to the following structure: config.yaml: |\n \n\nFor detailed driver configuration, please refer to the sdk-go documentation: https://github.com/open-cluster-management-io/sdk-go/blob/main/pkg/cloudevents/README.md#supported-protocols-and-drivers", } func (WorkConfiguration) SwaggerDoc() map[string]string {