From 93a9d194af6d01a84aa213ddeb937d8349ba9d9a Mon Sep 17 00:00:00 2001 From: xuezhaojun Date: Tue, 19 Dec 2023 18:28:59 +0800 Subject: [PATCH] Fix: migration-controller depending on cluster-manager condition. (#328) Signed-off-by: xuezhaojun --- ...r-managedclustersetbindings-migration.yaml | 9 - ...-manager-managedclustersets-migration.yaml | 9 - .../migration_controller.go | 269 ++++++++++------ .../migration_controller_test.go | 301 ++++++++++++------ 4 files changed, 375 insertions(+), 213 deletions(-) delete mode 100644 manifests/cluster-manager/test/cluster-manager-managedclustersetbindings-migration.yaml delete mode 100644 manifests/cluster-manager/test/cluster-manager-managedclustersets-migration.yaml diff --git a/manifests/cluster-manager/test/cluster-manager-managedclustersetbindings-migration.yaml b/manifests/cluster-manager/test/cluster-manager-managedclustersetbindings-migration.yaml deleted file mode 100644 index 1277ea862..000000000 --- a/manifests/cluster-manager/test/cluster-manager-managedclustersetbindings-migration.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: migration.k8s.io/v1alpha1 -kind: StorageVersionMigration -metadata: - name: managedclustersetbindings.cluster.open-cluster-management.io -spec: - resource: - group: cluster.open-cluster-management.io - resource: managedclustersetbindings - version: v1beta1 diff --git a/manifests/cluster-manager/test/cluster-manager-managedclustersets-migration.yaml b/manifests/cluster-manager/test/cluster-manager-managedclustersets-migration.yaml deleted file mode 100644 index 0d955666c..000000000 --- a/manifests/cluster-manager/test/cluster-manager-managedclustersets-migration.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: migration.k8s.io/v1alpha1 -kind: StorageVersionMigration -metadata: - name: managedclustersets.cluster.open-cluster-management.io -spec: - resource: - group: cluster.open-cluster-management.io - resource: managedclustersets - version: v1beta1 diff --git a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go index dd46f10ab..589ab4b88 100644 --- a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go +++ b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller.go @@ -3,6 +3,7 @@ package migrationcontroller import ( "context" "fmt" + "time" "github.com/openshift/library-go/pkg/assets" "github.com/openshift/library-go/pkg/controller/factory" @@ -56,6 +57,7 @@ const ( MigrationSucceeded = "MigrationSucceeded" migrationRequestCRDName = "storageversionmigrations.migration.k8s.io" + reSyncTime = time.Second * 5 ) type crdMigrationController struct { @@ -65,6 +67,7 @@ type crdMigrationController struct { clusterManagerLister operatorlister.ClusterManagerLister recorder events.Recorder generateHubClusterClients func(hubConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error) + parseMigrations func() ([]*migrationv1alpha1.StorageVersionMigration, error) } // NewCRDMigrationController construct crd migration controller @@ -81,6 +84,7 @@ func NewCRDMigrationController( *operatorapiv1.ClusterManager, operatorapiv1.ClusterManagerSpec, operatorapiv1.ClusterManagerStatus]( clusterManagerClient), clusterManagerLister: clusterManagerInformer.Lister(), + parseMigrations: parseStorageVersionMigrationFiles, recorder: recorder, generateHubClusterClients: generateHubClients, } @@ -94,7 +98,12 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac clusterManagerName := controllerContext.QueueKey() klog.V(4).Infof("Reconciling ClusterManager %q", clusterManagerName) - if len(migrationRequestFiles) == 0 { + // if no migration files exist, do nothing and exit the reconcile + migrations, err := c.parseMigrations() + if err != nil { + return err + } + if len(migrations) == 0 { return nil } @@ -117,19 +126,13 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac return err } - // ClusterManager is deleting, we remove its related resources on hub - if !clusterManager.DeletionTimestamp.IsZero() { - return removeStorageVersionMigrations(ctx, migrationClient) - } - - // apply storage version migrations if it is supported + // find whether the storageversionmigration CRD is supported supported, err := supportStorageVersionMigration(ctx, apiExtensionClient) if err != nil { return err } - - newClusterManager := clusterManager.DeepCopy() if !supported { + newClusterManager := clusterManager.DeepCopy() meta.SetStatusCondition(&newClusterManager.Status.Conditions, metav1.Condition{ Type: MigrationSucceeded, Status: metav1.ConditionFalse, @@ -140,37 +143,71 @@ func (c *crdMigrationController) sync(ctx context.Context, controllerContext fac return err } + // if the ClusterManager is deleting, we remove its related resources on hub + if !clusterManager.DeletionTimestamp.IsZero() { + return removeStorageVersionMigrations(ctx, migrations, migrationClient) + } + // do not apply storage version migrations until other resources are applied if applied := meta.IsStatusConditionTrue(clusterManager.Status.Conditions, clusterManagerApplied); !applied { controllerContext.Queue().AddRateLimited(clusterManagerName) return nil } - err = applyStorageVersionMigrations(ctx, migrationClient, c.recorder) + var migrationCond metav1.Condition + // Update the status of the ClusterManager to indicate that the migration is in progress. + defer func() { + newClusterManager := clusterManager.DeepCopy() + meta.SetStatusCondition(&newClusterManager.Status.Conditions, migrationCond) + + _, err = c.patcher.PatchStatus(ctx, newClusterManager, newClusterManager.Status, clusterManager.Status) + if err != nil { + klog.Errorf("Failed to update ClusterManager status. %v", err) + controllerContext.Queue().AddRateLimited(clusterManagerName) + return + } + + //If migration not succeed, wait for all StorageVersionMigrations succeed. + if migrationCond.Status != metav1.ConditionTrue { + klog.V(4).Infof("Wait all StorageVersionMigrations succeed. migrationCond: %v. error: %v", migrationCond, err) + controllerContext.Queue().AddRateLimited(clusterManagerName) + } + }() + + err = checkCRDStorageVersion(ctx, migrations, apiExtensionClient) if err != nil { - klog.Errorf("Failed to apply StorageVersionMigrations. %v", err) - return err + klog.Errorf("Failed to check CRD current storage version. %v", err) + controllerContext.Queue().AddRateLimited(clusterManagerName) + c.recorder.Warningf("StorageVersionMigrationFailed", "Failed to check CRD current storage version. %v", err) + + migrationCond = metav1.Condition{ + Type: MigrationSucceeded, + Status: metav1.ConditionFalse, + Reason: "StorageVersionMigrationFailed", + Message: fmt.Sprintf("Failed to check CRD current storage version. %v", err), + } + return nil } - migrationCond, err := syncStorageVersionMigrationsCondition(ctx, migrationClient) + err = createStorageVersionMigrations(ctx, migrations, migrationClient, c.recorder) if err != nil { - klog.Errorf("Failed to sync StorageVersionMigrations condition. %v", err) + klog.Errorf("Failed to apply StorageVersionMigrations. %v", err) + + migrationCond = metav1.Condition{ + Type: MigrationSucceeded, + Status: metav1.ConditionFalse, + Reason: "StorageVersionMigrationFailed", + Message: fmt.Sprintf("Failed to create StorageVersionMigrations. %v", err), + } return err } - meta.SetStatusCondition(&newClusterManager.Status.Conditions, migrationCond) - - _, err = c.patcher.PatchStatus(ctx, newClusterManager, newClusterManager.Status, clusterManager.Status) + migrationCond, err = syncStorageVersionMigrationsCondition(ctx, migrations, migrationClient) if err != nil { + klog.Errorf("Failed to sync StorageVersionMigrations condition. %v", err) return err } - //If migration not succeed, wait for all StorageVersionMigrations succeed. - if migrationCond.Status != metav1.ConditionTrue { - klog.V(4).Infof("Wait all StorageVersionMigrations succeed. migrationCond: %v. error: %v", migrationCond, err) - controllerContext.Queue().AddRateLimited(clusterManagerName) - } - return nil } @@ -188,75 +225,103 @@ func supportStorageVersionMigration(ctx context.Context, apiExtensionClient apie func removeStorageVersionMigrations( ctx context.Context, + toRemoveMigrations []*migrationv1alpha1.StorageVersionMigration, migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter) error { - // Reomve storage version migrations - for _, file := range migrationRequestFiles { - err := removeStorageVersionMigration( - ctx, - migrationClient, - func(name string) ([]byte, error) { - template, err := manifests.ClusterManagerManifestFiles.ReadFile(name) - if err != nil { - return nil, err - } - return assets.MustCreateAssetFromTemplate(name, template, struct{}{}).Data, nil - }, - file, - ) + for _, migration := range toRemoveMigrations { + err := migrationClient.StorageVersionMigrations().Delete(ctx, migration.Name, metav1.DeleteOptions{}) + if errors.IsNotFound(err) { + continue + } if err != nil { - return err + return nil } } return nil } -func applyStorageVersionMigrations(ctx context.Context, - migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter, recorder events.Recorder) error { +// 1.The CRD must exists before the migration CR is created. +// 2.The CRD must have at least 2 version. +// 3.The version set in the migration CR must exist in the CRD. +// 4.The currrent storage vesion in CRD should not be the version in the migration CR, otherwise during the migration, the +// objects will still be stored in as this version.[This one requires creating migration CR with the version you want to migrate from] +func checkCRDStorageVersion(ctx context.Context, toCreateMigrations []*migrationv1alpha1.StorageVersionMigration, + apiExtensionClient apiextensionsclient.Interface) error { errs := []error{} - for _, file := range migrationRequestFiles { - required, err := parseStorageVersionMigrationFile( - func(name string) ([]byte, error) { - template, err := manifests.ClusterManagerManifestFiles.ReadFile(name) - if err != nil { - return nil, err - } - return assets.MustCreateAssetFromTemplate(name, template, struct{}{}).Data, nil - }, - file) + for _, migration := range toCreateMigrations { + // The CRD must exist + crd, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, + resourceToCRDName(migration.Spec.Resource.Resource, migration.Spec.Resource.Group), metav1.GetOptions{}) if err != nil { errs = append(errs, err) continue } - _, _, err = applyStorageVersionMigration(ctx, migrationClient, required, recorder) + // The CRD must have at least 2 versions + if len(crd.Spec.Versions) < 2 { + errs = append(errs, fmt.Errorf("the CRD %v must have at least 2 versions", crd.Name)) + continue + } + + // The version set in the migration CR must exist in the CRD + var found bool + for _, version := range crd.Spec.Versions { + if version.Name == migration.Spec.Resource.Version { + found = true + break + } + } + if !found { + errs = append(errs, fmt.Errorf("the version %v in the migration CR %v does not exist in the CRD %v", + migration.Spec.Resource.Version, migration.Name, crd.Name)) + continue + } + + // The currrent storage vesion in CRD should not be the version in the migration CR + var storageVersion string + for _, version := range crd.Spec.Versions { + if version.Name == migration.Spec.Resource.Version && version.Storage { + storageVersion = version.Name // find the current storage version of the CRD + break + } + } + if storageVersion == migration.Spec.Resource.Version { + errs = append(errs, fmt.Errorf("the current storage version of %v is %v, which is the same as the version in the migration CR %v", + resourceToCRDName(migration.Spec.Resource.Resource, migration.Spec.Resource.Group), + storageVersion, migration.Name)) + continue + } + } + return operatorhelpers.NewMultiLineAggregate(errs) +} + +// StorageVersionMigration is a create-only, job-style CR, once it's done, updating the spec won't trigger a new migration. +// See code details in: +// https://github.com/kubernetes-sigs/kube-storage-version-migrator/blob/5c8923c5ff96ceb4435f66b986b5aec2dd0cbc22/pkg/controller/kubemigrator.go#L105-L108 +func createStorageVersionMigrations(ctx context.Context, + toCreateMigrations []*migrationv1alpha1.StorageVersionMigration, + migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter, recorder events.Recorder) error { + errs := []error{} + for _, migration := range toCreateMigrations { + err := createStorageVersionMigration(ctx, migrationClient, migration, recorder) if err != nil { errs = append(errs, err) continue } } - return operatorhelpers.NewMultiLineAggregate(errs) } +func resourceToCRDName(resource, group string) string { + return fmt.Sprintf("%s.%s", resource, group) +} + // syncStorageVersionMigrationsCondition sync the migration condition based on all the StorageVersionMigrations status // 1. migrationSucceeded is true only when all the StorageVersionMigrations resources succeed. // 2. migrationSucceeded is false when any of the StorageVersionMigrations resources failed or running -func syncStorageVersionMigrationsCondition(ctx context.Context, +func syncStorageVersionMigrationsCondition(ctx context.Context, toSyncMigrations []*migrationv1alpha1.StorageVersionMigration, migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter) (metav1.Condition, error) { - for _, file := range migrationRequestFiles { - required, err := parseStorageVersionMigrationFile( - func(name string) ([]byte, error) { - template, err := manifests.ClusterManagerManifestFiles.ReadFile(name) - if err != nil { - return nil, err - } - return assets.MustCreateAssetFromTemplate(name, template, struct{}{}).Data, nil - }, - file) - if err != nil { - return metav1.Condition{}, err - } - existing, err := migrationClient.StorageVersionMigrations().Get(ctx, required.Name, metav1.GetOptions{}) + for _, migration := range toSyncMigrations { + existing, err := migrationClient.StorageVersionMigrations().Get(ctx, migration.Name, metav1.GetOptions{}) if err != nil { return metav1.Condition{}, err } @@ -296,20 +361,29 @@ func syncStorageVersionMigrationsCondition(ctx context.Context, }, nil } -func removeStorageVersionMigration( - ctx context.Context, - migrationClient migrationv1alpha1client.StorageVersionMigrationsGetter, - manifests resourceapply.AssetFunc, - file string) error { - required, err := parseStorageVersionMigrationFile(manifests, file) - if err != nil { - return err +func parseStorageVersionMigrationFiles() ([]*migrationv1alpha1.StorageVersionMigration, error) { + var errs []error + var migrations []*migrationv1alpha1.StorageVersionMigration + for _, file := range migrationRequestFiles { + migration, err := parseStorageVersionMigrationFile( + func(name string) ([]byte, error) { + template, err := manifests.ClusterManagerManifestFiles.ReadFile(name) + if err != nil { + return nil, err + } + return assets.MustCreateAssetFromTemplate(name, template, struct{}{}).Data, nil + }, + file) + if err != nil { + errs = append(errs, err) + continue + } + migrations = append(migrations, migration) } - err = migrationClient.StorageVersionMigrations().Delete(ctx, required.Name, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - return nil + if len(errs) > 0 { + return nil, operatorhelpers.NewMultiLineAggregate(errs) } - return err + return migrations, nil } func parseStorageVersionMigrationFile( @@ -333,48 +407,41 @@ func parseStorageVersionMigrationFile( return svm, nil } -func applyStorageVersionMigration( +func createStorageVersionMigration( ctx context.Context, client migrationv1alpha1client.StorageVersionMigrationsGetter, - required *migrationv1alpha1.StorageVersionMigration, + migration *migrationv1alpha1.StorageVersionMigration, recorder events.Recorder, -) (*migrationv1alpha1.StorageVersionMigration, bool, error) { - if required == nil { - return nil, false, fmt.Errorf("required StorageVersionMigration is nil") +) error { + if migration == nil { + return fmt.Errorf("required StorageVersionMigration is nil") } - existing, err := client.StorageVersionMigrations().Get(ctx, required.Name, metav1.GetOptions{}) + existing, err := client.StorageVersionMigrations().Get(ctx, migration.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { - actual, err := client.StorageVersionMigrations().Create(context.TODO(), required, metav1.CreateOptions{}) + actual, err := client.StorageVersionMigrations().Create(context.TODO(), migration, metav1.CreateOptions{}) if err != nil { - recorder.Warningf("StorageVersionMigrationCreateFailed", "Failed to create %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(required), err) - return actual, true, err + recorder.Warningf("StorageVersionMigrationCreateFailed", "Failed to create %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(migration), err) + return err } recorder.Eventf("StorageVersionMigrationCreated", "Created %s because it was missing", resourcehelper.FormatResourceForCLIWithNamespace(actual)) - return actual, true, err + return err } if err != nil { - return nil, false, err + return err } modified := resourcemerge.BoolPtr(false) existingCopy := existing.DeepCopy() - resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) - if !equality.Semantic.DeepEqual(existingCopy.Spec, required.Spec) { + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, migration.ObjectMeta) + if !equality.Semantic.DeepEqual(existingCopy.Spec, migration.Spec) { *modified = true - existing.Spec = required.Spec } if !*modified { - return existing, false, nil + return nil // nothing change in the spec } - actual, err := client.StorageVersionMigrations().Update(ctx, existingCopy, metav1.UpdateOptions{}) - if err != nil { - recorder.Warningf("StorageVersionMigrationUpdateFailed", "Failed to update %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(existingCopy), err) - return actual, true, err - } - recorder.Eventf("StorageVersionMigrationUpdated", "Updated %s because it changed", resourcehelper.FormatResourceForCLIWithNamespace(actual)) - return actual, true, nil + return fmt.Errorf("StorageVersionMigrationConflict: Trying to set %s with different spec", migration.Name) } func getStorageVersionMigrationStatusCondition(svmcr *migrationv1alpha1.StorageVersionMigration) *migrationv1alpha1.MigrationCondition { diff --git a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go index 3a2f536d5..adc54bdeb 100644 --- a/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go +++ b/pkg/operator/operators/clustermanager/controllers/migrationcontroller/migration_controller_test.go @@ -29,12 +29,40 @@ import ( testingcommon "open-cluster-management.io/ocm/pkg/common/testing" ) -var ( - testMigrationRequestFiles = []string{ - "cluster-manager/test/cluster-manager-managedclustersets-migration.yaml", - "cluster-manager/test/cluster-manager-managedclustersetbindings-migration.yaml", +func newFakeCRD(name string, storageVersion string, versions ...string) runtime.Object { + crd := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, } -) + crd.Spec.Versions = make([]apiextensionsv1.CustomResourceDefinitionVersion, len(versions)) + for i, version := range versions { + storage := false + if version == storageVersion { + storage = true + } + crd.Spec.Versions[i] = apiextensionsv1.CustomResourceDefinitionVersion{ + Name: version, + Storage: storage, + } + } + return crd +} + +func newFakeMigration(name, group, resource, version string) *migrationv1alpha1.StorageVersionMigration { + return &migrationv1alpha1.StorageVersionMigration{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: migrationv1alpha1.StorageVersionMigrationSpec{ + Resource: migrationv1alpha1.GroupVersionResource{ + Group: group, + Version: version, + Resource: resource, + }, + }, + } +} func TestSupportStorageVersionMigration(t *testing.T) { cases := []struct { @@ -49,7 +77,7 @@ func TestSupportStorageVersionMigration(t *testing.T) { { name: "support", existingObjects: []runtime.Object{ - newCrd(migrationRequestCRDName), + newFakeCRD(migrationRequestCRDName, "v1", "v1"), }, supported: true, }, @@ -69,98 +97,180 @@ func TestSupportStorageVersionMigration(t *testing.T) { } } -func newCrd(name string) runtime.Object { - return &apiextensionsv1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, +func TestCheckCRDStorageVersion(t *testing.T) { + cases := []struct { + name string + crds []runtime.Object + toCreateMigrations []*migrationv1alpha1.StorageVersionMigration + expectErr bool + }{ + { + name: "CRDs with 2 versions: v1beta1, v1beta2; the storage version is v1beta2", + crds: []runtime.Object{ + newFakeCRD("foos.cluster.open-cluster-management.io", "v1beta2", "v1beta1", "v1beta2"), + newFakeCRD("bars.cluster.open-cluster-management.io", "v1beta2", "v1beta1", "v1beta2"), + }, + toCreateMigrations: []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1beta1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1beta1"), + }, + expectErr: false, }, + { + name: "CRDs don't exist", + toCreateMigrations: []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1beta1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1beta1"), + }, + expectErr: true, + }, + { + name: "CRDs exist, but the storage version is still the previous one", + crds: []runtime.Object{ + newFakeCRD("foos.cluster.open-cluster-management.io", "v1beta1", "v1beta1", "v1beta2"), + newFakeCRD("bars.cluster.open-cluster-management.io", "v1beta1", "v1beta1", "v1beta2"), + }, + toCreateMigrations: []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1beta1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1beta1"), + }, + expectErr: true, + }, + { + name: "CRDs exist, only have 1 version", + crds: []runtime.Object{ + newFakeCRD("foos.cluster.open-cluster-management.io", "v1beta1", "v1beta1"), + newFakeCRD("bars.cluster.open-cluster-management.io", "v1beta1", "v1beta1"), + }, + toCreateMigrations: []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1beta1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1beta1"), + }, + expectErr: true, + }, + { + name: "CRDs exist, but the version in the migration CR is not included in the CRD", + crds: []runtime.Object{ + newFakeCRD("foos.cluster.open-cluster-management.io", "v1beta2", "v1beta1", "v1beta2"), + newFakeCRD("bars.cluster.open-cluster-management.io", "v1beta2", "v1beta1", "v1beta2"), + }, + toCreateMigrations: []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1alpha1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1alpha1"), + }, + expectErr: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + fakeCRDClient := fakeapiextensions.NewSimpleClientset(c.crds...) + + err := checkCRDStorageVersion(context.TODO(), + c.toCreateMigrations, fakeCRDClient) + if c.expectErr && err != nil { + return + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + }) } } -func TestApplyStorageVersionMigrations(t *testing.T) { +func TestCreateStorageVersionMigrations(t *testing.T) { cases := []struct { - name string - existingObjects []runtime.Object - validateActions func(t *testing.T, actions []clienttesting.Action) + name string + existingMigrations []runtime.Object + toCreateMigrations []*migrationv1alpha1.StorageVersionMigration + expectErr bool + validateActions func(t *testing.T, actions []clienttesting.Action) }{ { - name: "created", + // No existing migrations been created + // Expect to create migration requests for the example CRD + name: "No existing migrations been created", + existingMigrations: []runtime.Object{}, + toCreateMigrations: []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1beta1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1beta1"), + }, + expectErr: false, validateActions: func(t *testing.T, actions []clienttesting.Action) { assertActions(t, actions, "get", "create", "get", "create") actual := actions[1].(clienttesting.CreateActionImpl).Object - assertStorageVersionMigration(t, "managedclustersets.cluster.open-cluster-management.io", actual) + assertStorageVersionMigration(t, "foo", actual) actual = actions[3].(clienttesting.CreateActionImpl).Object - assertStorageVersionMigration(t, "managedclustersetbindings.cluster.open-cluster-management.io", actual) + assertStorageVersionMigration(t, "bar", actual) }, }, { - name: "created and updated", - existingObjects: []runtime.Object{ - &migrationv1alpha1.StorageVersionMigration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersetbindings.cluster.open-cluster-management.io", - }, - }, - &migrationv1alpha1.StorageVersionMigration{ - ObjectMeta: metav1.ObjectMeta{ - Name: "placementdecisions.cluster.open-cluster-management.io", - }, - }, + // The existing migrations been created with different spec + // Expect to return err + name: "CRDs storage version not update", + existingMigrations: []runtime.Object{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1alpha1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1alpha1"), }, - validateActions: func(t *testing.T, actions []clienttesting.Action) { - assertActions(t, actions, "get", "create", "get", "update") - actual := actions[1].(clienttesting.CreateActionImpl).Object - assertStorageVersionMigration(t, "managedclustersets.cluster.open-cluster-management.io", actual) - actual = actions[3].(clienttesting.UpdateActionImpl).Object - assertStorageVersionMigration(t, "managedclustersetbindings.cluster.open-cluster-management.io", actual) + toCreateMigrations: []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1beta1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1beta1"), }, + expectErr: true, }, } - if len(migrationRequestFiles) == 0 { - t.Log("testing with test migrationRequestFiles") - migrationRequestFiles = testMigrationRequestFiles - } - for _, c := range cases { t.Run(c.name, func(t *testing.T) { - fakeMigrationClient := fakemigrationclient.NewSimpleClientset(c.existingObjects...) + fakeMigrationClient := fakemigrationclient.NewSimpleClientset(c.existingMigrations...) - err := applyStorageVersionMigrations(context.TODO(), fakeMigrationClient.MigrationV1alpha1(), eventstesting.NewTestingEventRecorder(t)) + err := createStorageVersionMigrations(context.TODO(), + c.toCreateMigrations, fakeMigrationClient.MigrationV1alpha1(), + eventstesting.NewTestingEventRecorder(t)) + if c.expectErr && err != nil { + return + } if err != nil { t.Fatalf("unexpected error: %v", err) } + c.validateActions(t, fakeMigrationClient.Actions()) }) } } func TestRemoveStorageVersionMigrations(t *testing.T) { - names := []string{ - "managedclustersets.cluster.open-cluster-management.io", - "managedclustersetbindings.cluster.open-cluster-management.io", - "placements.cluster.open-cluster-management.io", - "placementdecisions.cluster.open-cluster-management.io", - } cases := []struct { - name string - existingObjects []runtime.Object - validateActions func(t *testing.T, actions []clienttesting.Action) + name string + existingMigrations []runtime.Object + toRemoveMigrations []*migrationv1alpha1.StorageVersionMigration }{ { name: "not exists", }, { name: "removed", - existingObjects: []runtime.Object{ + existingMigrations: []runtime.Object{ &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersetbindings.cluster.open-cluster-management.io", + Name: "foo.cluster.open-cluster-management.io", }, }, &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "placementdecisions.cluster.open-cluster-management.io", + Name: "bar.cluster.open-cluster-management.io", + }, + }, + }, + toRemoveMigrations: []*migrationv1alpha1.StorageVersionMigration{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "foo.cluster.open-cluster-management.io", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bar.cluster.open-cluster-management.io", }, }, }, @@ -169,14 +279,14 @@ func TestRemoveStorageVersionMigrations(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - fakeMigrationClient := fakemigrationclient.NewSimpleClientset(c.existingObjects...) - err := removeStorageVersionMigrations(context.TODO(), fakeMigrationClient.MigrationV1alpha1()) + fakeMigrationClient := fakemigrationclient.NewSimpleClientset(c.existingMigrations...) + err := removeStorageVersionMigrations(context.TODO(), nil, fakeMigrationClient.MigrationV1alpha1()) if err != nil { t.Fatalf("unexpected error: %v", err) } - for _, name := range names { - _, err := fakeMigrationClient.MigrationV1alpha1().StorageVersionMigrations().Get(context.TODO(), name, metav1.GetOptions{}) + for _, m := range c.toRemoveMigrations { + _, err := fakeMigrationClient.MigrationV1alpha1().StorageVersionMigrations().Get(context.TODO(), m.Name, metav1.GetOptions{}) if errors.IsNotFound(err) { continue } @@ -210,25 +320,29 @@ func assertStorageVersionMigration(t *testing.T, name string, object runtime.Obj } } -func Test_syncStorageVersionMigrationsCondition(t *testing.T) { +func TestSyncStorageVersionMigrationsCondition(t *testing.T) { + toSyncMigrations := []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foos.cluster.open-cluster-management.io", "cluster.open-cluster-management.io", "foos", "v1beta1"), + newFakeMigration("bars.cluster.open-cluster-management.io", "cluster.open-cluster-management.io", "bars", "v1beta1"), + } - tests := []struct { - name string - existingObjects []runtime.Object - want metav1.Condition - wantErr bool + cases := []struct { + name string + existingMigrations []runtime.Object + want metav1.Condition + wantErr bool }{ { name: "empty condition", - existingObjects: []runtime.Object{ + existingMigrations: []runtime.Object{ &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersetbindings.cluster.open-cluster-management.io", + Name: "foos.cluster.open-cluster-management.io", }, }, &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersets.cluster.open-cluster-management.io", + Name: "bars.cluster.open-cluster-management.io", }, }, }, @@ -240,10 +354,10 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, { name: "all migration running condition", - existingObjects: []runtime.Object{ + existingMigrations: []runtime.Object{ &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersetbindings.cluster.open-cluster-management.io", + Name: "foos.cluster.open-cluster-management.io", }, Status: migrationv1alpha1.StorageVersionMigrationStatus{ Conditions: []migrationv1alpha1.MigrationCondition{ @@ -256,7 +370,7 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersets.cluster.open-cluster-management.io", + Name: "bars.cluster.open-cluster-management.io", }, Status: migrationv1alpha1.StorageVersionMigrationStatus{ Conditions: []migrationv1alpha1.MigrationCondition{ @@ -276,10 +390,10 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, { name: "one migration running, one succeed", - existingObjects: []runtime.Object{ + existingMigrations: []runtime.Object{ &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersetbindings.cluster.open-cluster-management.io", + Name: "foos.cluster.open-cluster-management.io", }, Status: migrationv1alpha1.StorageVersionMigrationStatus{ Conditions: []migrationv1alpha1.MigrationCondition{ @@ -292,7 +406,7 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersets.cluster.open-cluster-management.io", + Name: "bars.cluster.open-cluster-management.io", }, Status: migrationv1alpha1.StorageVersionMigrationStatus{ Conditions: []migrationv1alpha1.MigrationCondition{ @@ -312,10 +426,10 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, { name: "one migration failed, one succeed", - existingObjects: []runtime.Object{ + existingMigrations: []runtime.Object{ &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersetbindings.cluster.open-cluster-management.io", + Name: "foos.cluster.open-cluster-management.io", }, Status: migrationv1alpha1.StorageVersionMigrationStatus{ Conditions: []migrationv1alpha1.MigrationCondition{ @@ -328,7 +442,7 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersets.cluster.open-cluster-management.io", + Name: "bars.cluster.open-cluster-management.io", }, Status: migrationv1alpha1.StorageVersionMigrationStatus{ Conditions: []migrationv1alpha1.MigrationCondition{ @@ -348,10 +462,10 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, { name: "all migration succeed", - existingObjects: []runtime.Object{ + existingMigrations: []runtime.Object{ &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersetbindings.cluster.open-cluster-management.io", + Name: "foos.cluster.open-cluster-management.io", }, Status: migrationv1alpha1.StorageVersionMigrationStatus{ Conditions: []migrationv1alpha1.MigrationCondition{ @@ -364,7 +478,7 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ - Name: "managedclustersets.cluster.open-cluster-management.io", + Name: "bars.cluster.open-cluster-management.io", }, Status: migrationv1alpha1.StorageVersionMigrationStatus{ Conditions: []migrationv1alpha1.MigrationCondition{ @@ -384,16 +498,11 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { }, } - if len(migrationRequestFiles) == 0 { - t.Log("testing with test migrationRequestFiles") - migrationRequestFiles = testMigrationRequestFiles - } - - for _, tt := range tests { + for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - fakeMigrationClient := fakemigrationclient.NewSimpleClientset(tt.existingObjects...) + fakeMigrationClient := fakemigrationclient.NewSimpleClientset(tt.existingMigrations...) - got, err := syncStorageVersionMigrationsCondition(context.Background(), fakeMigrationClient.MigrationV1alpha1()) + got, err := syncStorageVersionMigrationsCondition(context.Background(), toSyncMigrations, fakeMigrationClient.MigrationV1alpha1()) if (err != nil) != tt.wantErr { t.Errorf("syncStorageVersionMigrationsCondition() error = %v, wantErr %v", err, tt.wantErr) return @@ -406,11 +515,6 @@ func Test_syncStorageVersionMigrationsCondition(t *testing.T) { } func TestSync(t *testing.T) { - if len(migrationRequestFiles) == 0 { - t.Log("testing with test migrationRequestFiles") - migrationRequestFiles = testMigrationRequestFiles - } - clusterManager := newClusterManager("testhub") tc, client := newTestController(t, clusterManager) @@ -436,8 +540,11 @@ func TestSync(t *testing.T) { Status: metav1.ConditionTrue, }, } - migrateCrd := newCrd(migrationRequestCRDName) - tc, client = newTestController(t, clusterManager, migrateCrd) + + tc, client = newTestController(t, clusterManager, + newFakeCRD(migrationRequestCRDName, "v1", "v1"), + newFakeCRD("foos.cluster.open-cluster-management.io", "v1beta2", "v1beta1", "v1beta2"), + newFakeCRD("bars.cluster.open-cluster-management.io", "v1beta2", "v1beta1", "v1beta2")) err = tc.sync(context.Background(), syncContext) if err != nil { t.Fatalf("Expected no error when sync, %v", err) @@ -471,6 +578,12 @@ func newTestController( hubKubeConfig *rest.Config) (apiextensionsclient.Interface, migrationv1alpha1client.StorageVersionMigrationsGetter, error) { return fakeAPIExtensionClient, fakeMigrationClient.MigrationV1alpha1(), nil } + crdMigrationController.parseMigrations = func() ([]*migrationv1alpha1.StorageVersionMigration, error) { + return []*migrationv1alpha1.StorageVersionMigration{ + newFakeMigration("foo", "cluster.open-cluster-management.io", "foos", "v1beta1"), + newFakeMigration("bar", "cluster.open-cluster-management.io", "bars", "v1beta1"), + }, nil + } store := operatorInformers.Operator().V1().ClusterManagers().Informer().GetStore() if err := store.Add(clustermanager); err != nil { t.Fatal(err)