Skip to content

Commit

Permalink
refactor constants
Browse files Browse the repository at this point in the history
Signed-off-by: Zhiwei Yin <zyin@redhat.com>
  • Loading branch information
zhiweiyin318 committed Apr 12, 2024
1 parent 837493e commit 4d333d5
Show file tree
Hide file tree
Showing 36 changed files with 361 additions and 198 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,8 @@ spec:
type: string
type: object
workConfiguration:
default:
workDriver: kube
description: WorkConfiguration contains the configuration of work
properties:
featureGates:
Expand Down Expand Up @@ -356,6 +358,22 @@ spec:
- feature
type: object
type: array
workDriver:
default: kube
description: "WorkDriver represents the type of work driver. Possible
values are \"kube\", \"mqtt\", or \"grpc\". If not provided,
the default value is \"kube\". If set to non-\"kube\" drivers,
the klusterlet need to use the same driver. and the driver configuration
must be provided in a secret named \"work-driver-config\" in
the namespace where the cluster manager is running, adhering
to the following structure: config.yaml: | <driver-config-in-yaml>
\n For detailed driver configuration, please refer to the sdk-go
documentation: https://github.com/open-cluster-management-io/sdk-go/blob/main/pkg/cloudevents/README.md#supported-protocols-and-drivers"
enum:
- kube
- mqtt
- grpc
type: string
type: object
workImagePullSpec:
default: quay.io/open-cluster-management/work
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,8 @@ spec:
type: string
type: object
workConfiguration:
default:
workDriver: kube
description: WorkConfiguration contains the configuration of work
properties:
featureGates:
Expand Down Expand Up @@ -356,6 +358,22 @@ spec:
- feature
type: object
type: array
workDriver:
default: kube
description: "WorkDriver represents the type of work driver. Possible
values are \"kube\", \"mqtt\", or \"grpc\". If not provided,
the default value is \"kube\". If set to non-\"kube\" drivers,
the klusterlet need to use the same driver. and the driver configuration
must be provided in a secret named \"work-driver-config\" in
the namespace where the cluster manager is running, adhering
to the following structure: config.yaml: | <driver-config-in-yaml>
\n For detailed driver configuration, please refer to the sdk-go
documentation: https://github.com/open-cluster-management-io/sdk-go/blob/main/pkg/cloudevents/README.md#supported-protocols-and-drivers"
enum:
- kube
- mqtt
- grpc
type: string
type: object
workImagePullSpec:
default: quay.io/open-cluster-management/work
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ require (
k8s.io/kube-aggregator v0.29.3
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
open-cluster-management.io/addon-framework v0.9.1-0.20240327031342-b0ef02a14044
open-cluster-management.io/api v0.13.0
open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c
open-cluster-management.io/sdk-go v0.13.1-0.20240313075541-00a94671ced1
sigs.k8s.io/controller-runtime v0.17.2
sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -425,8 +425,8 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0g
k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
open-cluster-management.io/addon-framework v0.9.1-0.20240327031342-b0ef02a14044 h1:QYOy/OTh/V+udMtPMPo8dA4yEx0ijKIdJ9/GEHFBZ8I=
open-cluster-management.io/addon-framework v0.9.1-0.20240327031342-b0ef02a14044/go.mod h1:nQMHHshMfMNj4qdwg/4oMqRf42FQU6EYy68o2HsLgn4=
open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk=
open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w=
open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c h1:/iUoY6/PqBmcBq3v0+UBFvIcI39k/QPRGqpOv9XtDIc=
open-cluster-management.io/api v0.13.1-0.20240411131856-8f6aa25f111c/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w=
open-cluster-management.io/sdk-go v0.13.1-0.20240313075541-00a94671ced1 h1:s3dJdi1eol+/8ek6JQuaEuoGPkK/wRyM9zowqzKHPDY=
open-cluster-management.io/sdk-go v0.13.1-0.20240313075541-00a94671ced1/go.mod h1:sq+amR9Ls9JzMP5dypvlCx4jIGfDg45gicS67Z/MnlI=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
Expand Down
10 changes: 8 additions & 2 deletions pkg/operator/helpers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1"

"open-cluster-management.io/api/feature"
operatorapiv1 "open-cluster-management.io/api/operator/v1"
)

Expand All @@ -49,16 +50,21 @@ const (
FeatureGatesReasonInvalidExisting = "InvalidFeatureGatesExisting"
)

const (
// ImagePullSecret is the image pull secret for operator components, which is synced from the operator ns to hub/spoke/addon ns.
ImagePullSecret = "open-cluster-management-image-pull-credentials"
)

var (
genericScheme = runtime.NewScheme()
genericCodecs = serializer.NewCodecFactory(genericScheme)
genericCodec = genericCodecs.UniversalDeserializer()

DefaultHubRegistrationFeatureGates = []operatorapiv1.FeatureGate{
{Feature: "DefaultClusterSet", Mode: operatorapiv1.FeatureGateModeTypeEnable},
{Feature: string(feature.DefaultClusterSet), Mode: operatorapiv1.FeatureGateModeTypeEnable},
}
DefaultSpokeRegistrationFeatureGates = []operatorapiv1.FeatureGate{
{Feature: "AddonManagement", Mode: operatorapiv1.FeatureGateModeTypeEnable},
{Feature: string(feature.AddonManagement), Mode: operatorapiv1.FeatureGateModeTypeEnable},
}
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,7 @@ import (
)

const (
clusterManagerFinalizer = "operator.open-cluster-management.io/cluster-manager-cleanup"
clusterManagerApplied = "Applied"
clusterManagerProgressing = "Progressing"
clusterManagerFinalizer = "operator.open-cluster-management.io/cluster-manager-cleanup"

defaultWebhookPort = int32(9443)
clusterManagerReSyncTime = 5 * time.Second
Expand Down Expand Up @@ -267,9 +265,9 @@ func (n *clusterManagerController) sync(ctx context.Context, controllerContext f
clusterManager.Status.ObservedGeneration = clusterManager.Generation
if len(errs) == 0 {
meta.SetStatusCondition(&clusterManager.Status.Conditions, metav1.Condition{
Type: clusterManagerApplied,
Type: operatorapiv1.ConditionClusterManagerApplied,
Status: metav1.ConditionTrue,
Reason: "ClusterManagerApplied",
Reason: operatorapiv1.ReasonClusterManagerApplied,
Message: "Components of cluster manager are applied",
})
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ func (c *crdReconcile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterM
},
hubCRDResourceFiles...); err != nil {
meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{
Type: clusterManagerApplied,
Type: operatorapiv1.ConditionClusterManagerApplied,
Status: metav1.ConditionFalse,
Reason: "CRDApplyFaild",
Reason: operatorapiv1.ReasonClusterManagerCRDApplyFailed,
Message: fmt.Sprintf("Failed to apply crd: %v", err),
})
return cm, reconcileStop, err
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ func (c *hubReoncile) reconcile(ctx context.Context, cm *operatorapiv1.ClusterMa

if len(appliedErrs) > 0 {
meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{
Type: clusterManagerApplied,
Type: operatorapiv1.ConditionClusterManagerApplied,
Status: metav1.ConditionFalse,
Reason: "HubResourceApplyFailed",
Message: fmt.Sprintf("Failed to apply hub resources: %v", utilerrors.NewAggregate(appliedErrs)),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus
config.MWReplicaSetEnabled, config.AddOnManagerEnabled)
if err != nil {
meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{
Type: clusterManagerApplied,
Type: operatorapiv1.ConditionClusterManagerApplied,
Status: metav1.ConditionFalse,
Reason: "ServiceAccountSyncFailed",
Reason: operatorapiv1.ReasonServiceAccountSyncFailed,
Message: fmt.Sprintf("Failed to sync service account: %v", err),
})
return cm, reconcileStop, err
Expand Down Expand Up @@ -158,25 +158,25 @@ func (c *runtimeReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus

if len(progressingDeployments) > 0 {
meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{
Type: clusterManagerProgressing,
Type: operatorapiv1.ConditionProgressing,
Status: metav1.ConditionTrue,
Reason: "ClusterManagerDeploymentRolling",
Reason: operatorapiv1.ReasonDeploymentRolling,
Message: fmt.Sprintf("Deployments %s is still rolling", strings.Join(progressingDeployments, ",")),
})
} else {
meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{
Type: clusterManagerProgressing,
Type: operatorapiv1.ConditionProgressing,
Status: metav1.ConditionFalse,
Reason: "ClusterManagerUpToDate",
Reason: operatorapiv1.ReasonUpToDate,
Message: "Components of cluster manager are up to date",
})
}

if len(appliedErrs) > 0 {
meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{
Type: clusterManagerApplied,
Type: operatorapiv1.ConditionClusterManagerApplied,
Status: metav1.ConditionFalse,
Reason: "RuntimeResourceApplyFailed",
Reason: operatorapiv1.ReasonRuntimeResourceApplyFailed,
Message: fmt.Sprintf("Failed to apply runtime resources: %v", utilerrors.NewAggregate(appliedErrs)),
})
return cm, reconcileStop, utilerrors.NewAggregate(appliedErrs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (c *webhookReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus
config manifests.HubConfig) (*operatorapiv1.ClusterManager, reconcileState, error) {
var appliedErrs []error

if !meta.IsStatusConditionFalse(cm.Status.Conditions, clusterManagerProgressing) {
if !meta.IsStatusConditionFalse(cm.Status.Conditions, operatorapiv1.ConditionProgressing) {
return cm, reconcileStop, commonhelpers.NewRequeueError("Deployment is not ready", clusterManagerReSyncTime)
}

Expand Down Expand Up @@ -81,9 +81,9 @@ func (c *webhookReconcile) reconcile(ctx context.Context, cm *operatorapiv1.Clus

if len(appliedErrs) > 0 {
meta.SetStatusCondition(&cm.Status.Conditions, metav1.Condition{
Type: clusterManagerApplied,
Type: operatorapiv1.ConditionClusterManagerApplied,
Status: metav1.ConditionFalse,
Reason: "WebhookApplyFailed",
Reason: operatorapiv1.ReasonWebhookApplyFailed,
Message: fmt.Sprintf("Failed to apply webhook resources: %v", utilerrors.NewAggregate(appliedErrs)),
})
return cm, reconcileStop, utilerrors.NewAggregate(appliedErrs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ import (

operatorinformer "open-cluster-management.io/api/client/operator/informers/externalversions/operator/v1"
operatorlister "open-cluster-management.io/api/client/operator/listers/operator/v1"
operatorapiv1 "open-cluster-management.io/api/operator/v1"

"open-cluster-management.io/ocm/pkg/common/queue"
"open-cluster-management.io/ocm/pkg/operator/helpers"
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller"
)

var (
Expand Down Expand Up @@ -78,7 +78,7 @@ func (c *crdStatusController) sync(ctx context.Context, controllerContext factor
}

// need to wait storage version migrations succeed.
if succeeded := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, migrationcontroller.MigrationSucceeded); !succeeded {
if succeeded := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, operatorapiv1.ConditionMigrationSucceeded); !succeeded {
controllerContext.Queue().AddRateLimited(clusterManagerName)
klog.V(4).Info("Wait storage version migration succeed.")
return nil
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,14 @@ import (
operatorapiv1 "open-cluster-management.io/api/operator/v1"

testingcommon "open-cluster-management.io/ocm/pkg/common/testing"
"open-cluster-management.io/ocm/pkg/operator/operators/clustermanager/controllers/migrationcontroller"
)

func TestSync(t *testing.T) {
clusterManager := newClusterManager("testhub")
tc := newTestController(t, clusterManager)

syncContext := testingcommon.NewFakeSyncContext(t, "testhub")
//Do not support migration
// Do not support migration
err := tc.sync(context.Background(), syncContext)
if err != nil {
t.Fatalf("Expected no error when sync, %v", err)
Expand All @@ -38,7 +37,7 @@ func TestSync(t *testing.T) {
// migration succeed
clusterManager.Status.Conditions = []metav1.Condition{
{
Type: migrationcontroller.MigrationSucceeded,
Type: operatorapiv1.ConditionMigrationSucceeded,
Status: metav1.ConditionTrue,
},
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package migrationcontroller
import (
"context"
"fmt"
"time"

"github.com/openshift/library-go/pkg/assets"
"github.com/openshift/library-go/pkg/controller/factory"
Expand Down Expand Up @@ -53,11 +52,7 @@ var (
)

const (
clusterManagerApplied = "Applied"
MigrationSucceeded = "MigrationSucceeded"

migrationRequestCRDName = "storageversionmigrations.migration.k8s.io"
reSyncTime = time.Second * 5
)

type crdMigrationController struct {
Expand Down Expand Up @@ -134,17 +129,17 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac
if !supported {
newClusterManager := clusterManager.DeepCopy()
meta.SetStatusCondition(&newClusterManager.Status.Conditions, metav1.Condition{
Type: MigrationSucceeded,
Type: operatorapiv1.ConditionMigrationSucceeded,
Status: metav1.ConditionFalse,
Reason: "StorageVersionMigrationFailed",
Reason: operatorapiv1.ReasonStorageVersionMigrationFailed,
Message: "Do not support StorageVersionMigration",
})
_, err = c.patcher.PatchStatus(ctx, newClusterManager, newClusterManager.Status, clusterManager.Status)
return err
}

// do not apply storage version migrations until other resources are applied
if applied := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, clusterManagerApplied); !applied {
if applied := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, operatorapiv1.ConditionClusterManagerApplied); !applied {
controllerContext.Queue().AddRateLimited(clusterManagerName)
return nil
}
Expand All @@ -162,7 +157,7 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac
return
}

//If migration not succeed, wait for all StorageVersionMigrations succeed.
// If migration not succeed, wait for all StorageVersionMigrations succeed.
if migrationCond.Status != metav1.ConditionTrue {
klog.V(4).Infof("Wait all StorageVersionMigrations succeed. migrationCond: %v. error: %v", migrationCond, err)
controllerContext.Queue().AddRateLimited(clusterManagerName)
Expand All @@ -176,9 +171,9 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac
c.recorder.Warningf("StorageVersionMigrationFailed", "Failed to check CRD current storage version. %v", err)

migrationCond = metav1.Condition{
Type: MigrationSucceeded,
Type: operatorapiv1.ConditionMigrationSucceeded,
Status: metav1.ConditionFalse,
Reason: "StorageVersionMigrationFailed",
Reason: operatorapiv1.ReasonStorageVersionMigrationFailed,
Message: fmt.Sprintf("Failed to check CRD current storage version. %v", err),
}
return nil
Expand All @@ -189,9 +184,9 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac
klog.Errorf("Failed to apply StorageVersionMigrations. %v", err)

migrationCond = metav1.Condition{
Type: MigrationSucceeded,
Type: operatorapiv1.ConditionMigrationSucceeded,
Status: metav1.ConditionFalse,
Reason: "StorageVersionMigrationFailed",
Reason: operatorapiv1.ReasonStorageVersionMigrationFailed,
Message: fmt.Sprintf("Failed to create StorageVersionMigrations. %v", err),
}
return err
Expand Down Expand Up @@ -308,9 +303,9 @@ func syncStorageVersionMigrationsCondition(ctx context.Context, toSyncMigrations
migrationStatusCondition := getStorageVersionMigrationStatusCondition(existing)
if migrationStatusCondition == nil {
return metav1.Condition{
Type: MigrationSucceeded,
Type: operatorapiv1.ConditionMigrationSucceeded,
Status: metav1.ConditionFalse,
Reason: "StorageVersionMigrationProcessing",
Reason: operatorapiv1.ReasonStorageVersionMigrationProcessing,
Message: fmt.Sprintf("Wait StorageVersionMigration %v succeed.", existing.Name),
}, nil
}
Expand All @@ -319,24 +314,24 @@ func syncStorageVersionMigrationsCondition(ctx context.Context, toSyncMigrations
continue
case migrationv1alpha1.MigrationFailed:
return metav1.Condition{
Type: MigrationSucceeded,
Type: operatorapiv1.ConditionMigrationSucceeded,
Status: metav1.ConditionFalse,
Reason: fmt.Sprintf("StorageVersionMigration Failed. %v", migrationStatusCondition.Reason),
Message: fmt.Sprintf("Failed to wait StorageVersionMigration %v succeed. %v", existing.Name, migrationStatusCondition.Message),
}, nil
case migrationv1alpha1.MigrationRunning:
return metav1.Condition{
Type: MigrationSucceeded,
Type: operatorapiv1.ConditionMigrationSucceeded,
Status: metav1.ConditionFalse,
Reason: fmt.Sprintf("StorageVersionMigration Running. %v", migrationStatusCondition.Reason),
Message: fmt.Sprintf("Wait StorageVersionMigration %v succeed. %v", existing.Name, migrationStatusCondition.Message),
}, nil
}
}
return metav1.Condition{
Type: MigrationSucceeded,
Type: operatorapiv1.ConditionMigrationSucceeded,
Status: metav1.ConditionTrue,
Reason: "StorageVersionMigrationSucceed",
Reason: operatorapiv1.ReasonStorageVersionMigrationSucceed,
Message: "All StorageVersionMigrations Succeed",
}, nil
}
Expand Down
Loading

0 comments on commit 4d333d5

Please sign in to comment.