From cbcc50cce94ab2a467961a866c21d465d8cbac64 Mon Sep 17 00:00:00 2001 From: Evan Cordell Date: Wed, 4 Sep 2019 13:15:19 -0400 Subject: [PATCH] feat(olm): don't calculate a patch when updating via a replacement chain --- pkg/api/wrappers/deployment_install_client.go | 14 +- pkg/controller/operators/olm/operator_test.go | 415 +++++++++--------- test/e2e/installplan_e2e_test.go | 202 +++++++++ 3 files changed, 422 insertions(+), 209 deletions(-) diff --git a/pkg/api/wrappers/deployment_install_client.go b/pkg/api/wrappers/deployment_install_client.go index 5ddb69e414..c05b48bd3c 100644 --- a/pkg/api/wrappers/deployment_install_client.go +++ b/pkg/api/wrappers/deployment_install_client.go @@ -97,8 +97,18 @@ func (c *InstallStrategyDeploymentClientForNamespace) DeleteDeployment(name stri } func (c *InstallStrategyDeploymentClientForNamespace) CreateOrUpdateDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error) { - d, _, err := c.opClient.CreateOrRollingUpdateDeployment(deployment) - return d, err + _, err := c.opClient.GetDeployment(deployment.Namespace, deployment.Name) + if err != nil { + if !apierrors.IsNotFound(err) { + return nil, err + } + created, err := c.CreateDeployment(deployment) + if err != nil { + return nil, err + } + return created, err + } + return c.opClient.KubernetesInterface().AppsV1().Deployments(deployment.GetNamespace()).Update(deployment) } func (c *InstallStrategyDeploymentClientForNamespace) GetServiceAccountByName(serviceAccountName string) (*corev1.ServiceAccount, error) { diff --git a/pkg/controller/operators/olm/operator_test.go b/pkg/controller/operators/olm/operator_test.go index e10a5d56fe..315a40c62c 100644 --- a/pkg/controller/operators/olm/operator_test.go +++ b/pkg/controller/operators/olm/operator_test.go @@ -25,7 +25,6 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" - "k8s.io/apimachinery/pkg/api/equality" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -44,6 +43,8 @@ import ( apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" apiregistrationfake "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake" + configfake "github.com/openshift/client-go/config/clientset/versioned/fake" + v1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" @@ -61,7 +62,6 @@ import ( "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/scoped" "github.com/operator-framework/operator-registry/pkg/registry" - configfake "github.com/openshift/client-go/config/clientset/versioned/fake" ) type TestStrategy struct{} @@ -359,7 +359,6 @@ func deployment(deploymentName, namespace, serviceAccountName string, templateAn }, Status: appsv1.DeploymentStatus{ Replicas: singleInstance, - ReadyReplicas: singleInstance, AvailableReplicas: singleInstance, UpdatedReplicas: singleInstance, }, @@ -3037,6 +3036,8 @@ func TestTransitionCSV(t *testing.T) { } } +// TestUpdates verifies that a set of expected phase transitions occur when multiple CSVs are present +// and that they do not depend on sync order or event order func TestUpdates(t *testing.T) { // A - replacedby -> B - replacedby -> C namespace := "ns" @@ -3073,14 +3074,13 @@ func TestUpdates(t *testing.T) { } deleted := v1alpha1.ClusterServiceVersionPhase("deleted") - noPrevious := v1alpha1.ClusterServiceVersionPhase("NoPrevious") - + deploymentName := "csv1-dep1" crd := crd("c1", "v1", "g1") a := csv("csvA", namespace, "0.0.0", "", - installStrategy("csv1-dep1", nil, nil), + installStrategy(deploymentName, nil, nil), []*v1beta1.CustomResourceDefinition{crd}, []*v1beta1.CustomResourceDefinition{}, v1alpha1.CSVPhaseNone) @@ -3088,7 +3088,7 @@ func TestUpdates(t *testing.T) { namespace, "0.0.0", "csvA", - installStrategy("csv1-dep1", nil, nil), + installStrategy(deploymentName, nil, nil), []*v1beta1.CustomResourceDefinition{crd}, []*v1beta1.CustomResourceDefinition{}, v1alpha1.CSVPhaseNone) @@ -3096,175 +3096,140 @@ func TestUpdates(t *testing.T) { namespace, "0.0.0", "csvB", - installStrategy("csv1-dep1", nil, nil), + installStrategy(deploymentName, nil, nil), []*v1beta1.CustomResourceDefinition{crd}, []*v1beta1.CustomResourceDefinition{}, v1alpha1.CSVPhaseNone) - type csvPhases map[string][]v1alpha1.ClusterServiceVersionPhase + simulateSuccessfulRollout := func(csv *v1alpha1.ClusterServiceVersion, client operatorclient.ClientInterface) { + // get the deployment, which should exist + dep, err := client.GetDeployment(namespace, deploymentName) + require.NoError(t, err) + + // force it healthy + dep.Status.Replicas = 1 + dep.Status.UpdatedReplicas = 1 + dep.Status.AvailableReplicas = 1 + _, err = client.KubernetesInterface().AppsV1().Deployments(namespace).UpdateStatus(dep) + require.NoError(t, err) + } + + // when csv A is in phase, X, expect B and C to be in state Y + type csvPhaseKey struct { + name string + phase v1alpha1.ClusterServiceVersionPhase + } + type expectation struct { + whenIn csvPhaseKey + shouldBe map[string]v1alpha1.ClusterServiceVersionPhase + } + // for a given CSV and phase, set the expected phases of the other CSVs + expected := []expectation{ + { + whenIn: csvPhaseKey{name: a.GetName(), phase: v1alpha1.CSVPhaseNone}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + b.GetName(): v1alpha1.CSVPhaseNone, + c.GetName(): v1alpha1.CSVPhaseNone, + }, + }, + { + whenIn: csvPhaseKey{name: a.GetName(), phase: v1alpha1.CSVPhasePending}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + b.GetName(): v1alpha1.CSVPhasePending, + c.GetName(): v1alpha1.CSVPhasePending, + }, + }, + { + whenIn: csvPhaseKey{name: a.GetName(), phase: v1alpha1.CSVPhaseInstallReady}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + b.GetName(): v1alpha1.CSVPhasePending, + c.GetName(): v1alpha1.CSVPhasePending, + }, + }, + { + whenIn: csvPhaseKey{name: a.GetName(), phase: v1alpha1.CSVPhaseInstalling}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + b.GetName(): v1alpha1.CSVPhasePending, + c.GetName(): v1alpha1.CSVPhasePending, + }, + }, + { + whenIn: csvPhaseKey{name: a.GetName(), phase: v1alpha1.CSVPhaseSucceeded}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + b.GetName(): v1alpha1.CSVPhasePending, + c.GetName(): v1alpha1.CSVPhasePending, + }, + }, + { + whenIn: csvPhaseKey{name: b.GetName(), phase: v1alpha1.CSVPhaseInstallReady}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + a.GetName(): v1alpha1.CSVPhaseReplacing, + c.GetName(): v1alpha1.CSVPhasePending, + }, + }, + { + whenIn: csvPhaseKey{name: b.GetName(), phase: v1alpha1.CSVPhaseInstalling}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + a.GetName(): v1alpha1.CSVPhaseReplacing, + c.GetName(): v1alpha1.CSVPhasePending, + }, + }, + { + whenIn: csvPhaseKey{name: b.GetName(), phase: v1alpha1.CSVPhaseSucceeded}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + a.GetName(): v1alpha1.CSVPhaseDeleting, + c.GetName(): v1alpha1.CSVPhasePending, + }, + }, + { + whenIn: csvPhaseKey{name: c.GetName(), phase: v1alpha1.CSVPhaseInstallReady}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + a.GetName(): deleted, + b.GetName(): v1alpha1.CSVPhaseReplacing, + }, + }, + { + whenIn: csvPhaseKey{name: c.GetName(), phase: v1alpha1.CSVPhaseInstalling}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + a.GetName(): deleted, + b.GetName(): v1alpha1.CSVPhaseReplacing, + }, + }, + { + whenIn: csvPhaseKey{name: c.GetName(), phase: v1alpha1.CSVPhaseSucceeded}, + shouldBe: map[string]v1alpha1.ClusterServiceVersionPhase{ + a.GetName(): deleted, + b.GetName(): deleted, + }, + }, + } tests := []struct { - name string - in []*v1alpha1.ClusterServiceVersion - expected map[string][]v1alpha1.ClusterServiceVersionPhase + name string + in []*v1alpha1.ClusterServiceVersion }{ { name: "abc", in: []*v1alpha1.ClusterServiceVersion{a, b, c}, - expected: csvPhases{ - "csvA": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseDeleting, - deleted, - deleted, - deleted, - }, - "csvB": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseDeleting, - deleted, - }, - "csvC": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseSucceeded, - }, - }, + }, + { + name: "acb", + in: []*v1alpha1.ClusterServiceVersion{a, c, b}, }, { name: "bac", in: []*v1alpha1.ClusterServiceVersion{b, a, c}, - expected: csvPhases{ - "csvB": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseDeleting, - deleted, - }, - "csvA": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseDeleting, - deleted, - deleted, - deleted, - deleted, - }, - "csvC": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseSucceeded, - }, - }, + }, + { + name: "bca", + in: []*v1alpha1.ClusterServiceVersion{b, c, a}, }, { name: "cba", + in: []*v1alpha1.ClusterServiceVersion{c, b, a}, + }, + { + name: "cab", in: []*v1alpha1.ClusterServiceVersion{c, a, b}, - expected: csvPhases{ - "csvC": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseSucceeded, - }, - "csvB": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseDeleting, - deleted, - deleted, - }, - "csvA": { - v1alpha1.CSVPhaseNone, - v1alpha1.CSVPhasePending, - v1alpha1.CSVPhaseInstallReady, - v1alpha1.CSVPhaseInstalling, - v1alpha1.CSVPhaseSucceeded, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseReplacing, - v1alpha1.CSVPhaseDeleting, - deleted, - deleted, - deleted, - deleted, - }, - }, }, } for _, tt := range tests { @@ -3283,66 +3248,87 @@ func TestUpdates(t *testing.T) { ) require.NoError(t, err) - // Create input CSV set - for _, csv := range tt.in { - _, err := op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Create(csv) - require.NoError(t, err) + // helper to get the latest view of a set of CSVs from the set - we only expect no errors if not deleted + fetchLatestCSVs := func(csvsToSync map[string]*v1alpha1.ClusterServiceVersion, deleted map[string]struct{}) (out map[string]*v1alpha1.ClusterServiceVersion) { + out = map[string]*v1alpha1.ClusterServiceVersion{} + for name := range csvsToSync { + fetched, err := op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(name, metav1.GetOptions{}) + if _, ok := deleted[name]; !ok { + require.NoError(t, err) + out[name] = fetched + } + } + return out } - for i := range tt.expected["csvA"] { - // sync all csvs once - for _, csv := range tt.in { - name := csv.GetName() - expectedCurrent := tt.expected[name][i] - var expectedPrevious v1alpha1.ClusterServiceVersionPhase - if i > 0 { - expectedPrevious = tt.expected[name][i-1] - } else { - expectedPrevious = noPrevious + // helper to sync a set of csvs, in order, and return the latest view from the cluster + syncCSVs := func(csvsToSync map[string]*v1alpha1.ClusterServiceVersion, deleted map[string]struct{}) (out map[string]*v1alpha1.ClusterServiceVersion) { + for name, csv := range csvsToSync { + _ = op.syncClusterServiceVersion(csv) + if _, ok := deleted[name]; !ok { + require.NoError(t, err) } + } + return fetchLatestCSVs(csvsToSync, deleted) + } - if expectedPrevious == deleted { - // don't sync previously deleted csvs + // helper, given a set of expectations, pull out which entries we expect to have been deleted from the cluster + deletedCSVs := func(shouldBe map[string]v1alpha1.ClusterServiceVersionPhase) map[string]struct{} { + out := map[string]struct{}{} + for name, phase := range shouldBe { + if phase != deleted { continue } + out[name] = struct{}{} + } + return out + } - // Get the CSV from the cluster - fetched, err := op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(name, metav1.GetOptions{}) - require.NoError(t, err) + // Create input CSV set + csvsToSync := map[string]*v1alpha1.ClusterServiceVersion{} + for _, csv := range tt.in { + _, err := op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Create(csv) + require.NoError(t, err) + csvsToSync[csv.GetName()] = csv + } + + for _, e := range expected { + // get the latest view from the cluster + csvsToSync = fetchLatestCSVs(csvsToSync, deletedCSVs(e.shouldBe)) - // Sync the CSV once - _ = op.syncClusterServiceVersion(fetched) + // sync the current csv until it's reached the expected status + current := csvsToSync[e.whenIn.name] - // If the csv was deleted by the sync, we don't bother waiting for listers to sync - if expectedCurrent == deleted { + if current.Status.Phase == v1alpha1.CSVPhaseInstalling { + simulateSuccessfulRollout(current, op.opClient) + } + for current.Status.Phase != e.whenIn.phase { + fmt.Printf("waiting for (when) %s to be %s\n", e.whenIn.name, e.whenIn.phase) + csvsToSync = syncCSVs(csvsToSync, deletedCSVs(e.shouldBe)) + current = csvsToSync[e.whenIn.name] + } + + // sync the other csvs until they're in the expected status + for name, phase := range e.shouldBe { + if phase == deleted { + // todo verify deleted continue } - - // If we expect a change, wait for listers to sync the change so that the next sync reflects the changes - if expectedCurrent != expectedPrevious { - err = wait.PollImmediate(1*time.Millisecond, 10*time.Second, func() (bool, error) { - updated, err := op.lister.OperatorsV1alpha1().ClusterServiceVersionLister().ClusterServiceVersions(namespace).Get(csv.GetName()) - if k8serrors.IsNotFound(err) { - return false, nil - } - return !equality.Semantic.DeepEqual(updated, fetched), err - }) + other := csvsToSync[name] + for other.Status.Phase != phase { + fmt.Printf("waiting for %s to be %s\n", name, phase) + _ = op.syncClusterServiceVersion(other) + other, err = op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(name, metav1.GetOptions{}) require.NoError(t, err) } + csvsToSync[name] = other } - // check that each csv is in the expected phase - for _, csv := range tt.in { - expectedPhase := tt.expected[csv.GetName()][i] - if expectedPhase != deleted { - fetched, err := op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(csv.GetName(), metav1.GetOptions{}) - require.NoError(t, err) - t.Logf("%s - %v", csv.GetName(), fetched.Status) - require.Equal(t, string(expectedPhase), string(fetched.Status.Phase), "incorrect phase for %s at index %d", csv.GetName(), i) - } else { - _, err := op.client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Get(csv.GetName(), metav1.GetOptions{}) - require.Error(t, err) + for name, phase := range e.shouldBe { + if phase == deleted { + continue } + require.Equal(t, phase, csvsToSync[name].Status.Phase) } } }) @@ -3371,13 +3357,13 @@ func TestSyncOperatorGroups(t *testing.T) { }, }, } - + deploymentName := "csv1-dep1" crd := crd("c1", "v1", "fake.api.group") operatorCSV := csvWithLabels(csv("csv1", operatorNamespace, "0.0.0", "", - installStrategy("csv1-dep1", permissions, nil), + installStrategy(deploymentName, permissions, nil), []*v1beta1.CustomResourceDefinition{crd}, []*v1beta1.CustomResourceDefinition{}, v1alpha1.CSVPhaseNone, @@ -3481,7 +3467,7 @@ func TestSyncOperatorGroups(t *testing.T) { ownerutil.AddNonBlockingOwner(serviceAccount, operatorCSV) - ownedDeployment := deployment("csv1-dep1", operatorNamespace, serviceAccount.GetName(), nil) + ownedDeployment := deployment(deploymentName, operatorNamespace, serviceAccount.GetName(), nil) ownerutil.AddNonBlockingOwner(ownedDeployment, operatorCSV) annotatedDeployment := ownedDeployment.DeepCopy() @@ -4241,6 +4227,19 @@ func TestSyncOperatorGroups(t *testing.T) { ) require.NoError(t, err) + simulateSuccessfulRollout := func(csv *v1alpha1.ClusterServiceVersion, client operatorclient.ClientInterface) { + // get the deployment, which should exist + dep, err := client.GetDeployment(tt.initial.operatorGroup.GetNamespace(), deploymentName) + require.NoError(t, err) + + // force it healthy + dep.Status.Replicas = 1 + dep.Status.UpdatedReplicas = 1 + dep.Status.AvailableReplicas = 1 + _, err = client.KubernetesInterface().AppsV1().Deployments(tt.initial.operatorGroup.GetNamespace()).UpdateStatus(dep) + require.NoError(t, err) + } + err = op.syncOperatorGroups(tt.initial.operatorGroup) require.NoError(t, err) @@ -4270,7 +4269,9 @@ func TestSyncOperatorGroups(t *testing.T) { require.NoError(t, err) for i, obj := range opGroupCSVs.Items { - + if obj.Status.Phase == v1alpha1.CSVPhaseInstalling { + simulateSuccessfulRollout(&obj, op.opClient) + } err = op.syncClusterServiceVersion(&obj) require.NoError(t, err, "%#v", obj) diff --git a/test/e2e/installplan_e2e_test.go b/test/e2e/installplan_e2e_test.go index 0954abdf60..970d4ec4d6 100644 --- a/test/e2e/installplan_e2e_test.go +++ b/test/e2e/installplan_e2e_test.go @@ -1660,6 +1660,208 @@ func TestUpdateCatalogForSubscription(t *testing.T) { }) + t.Run("StopOnCSVModifications", func(t *testing.T) { + defer cleaner.NotifyTestComplete(t, true) + + c := newKubeClient(t) + crc := newCRClient(t) + defer func() { + require.NoError(t, crc.OperatorsV1alpha1().Subscriptions(testNamespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})) + }() + + // Build initial catalog + mainPackageName := genName("nginx-amplify-") + mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName) + stableChannel := "stable" + crdPlural := genName("ins-amplify-") + crdName := crdPlural + ".cluster.com" + mainCRD := apiextensions.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: crdName, + }, + Spec: apiextensions.CustomResourceDefinitionSpec{ + Group: "cluster.com", + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Served: true, + Storage: true, + }, + }, + Names: apiextensions.CustomResourceDefinitionNames{ + Plural: crdPlural, + Singular: crdPlural, + Kind: crdPlural, + ListKind: "list" + crdPlural, + }, + Scope: "Namespaced", + }, + } + + // Generate permissions + serviceAccountName := genName("nginx-sa") + permissions := []install.StrategyDeploymentPermissions{ + { + ServiceAccountName: serviceAccountName, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{rbac.VerbAll}, + APIGroups: []string{"cluster.com"}, + Resources: []string{crdPlural}, + }, + }, + }, + } + // Generate permissions + clusterPermissions := []install.StrategyDeploymentPermissions{ + { + ServiceAccountName: serviceAccountName, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{rbac.VerbAll}, + APIGroups: []string{"cluster.com"}, + Resources: []string{crdPlural}, + }, + }, + }, + } + + // Create the catalog sources + deploymentName := genName("dep-") + mainNamedStrategy := newNginxInstallStrategy(deploymentName, permissions, clusterPermissions) + mainCSV := newCSV(mainPackageStable, testNamespace, "", semver.MustParse("0.1.0"), nil, nil, mainNamedStrategy) + mainCatalogName := genName("mock-ocs-stomper-") + mainManifests := []registry.PackageManifest{ + { + PackageName: mainPackageName, + Channels: []registry.PackageChannel{ + {Name: stableChannel, CurrentCSVName: mainCSV.GetName()}, + }, + DefaultChannelName: stableChannel, + }, + } + _, cleanupMainCatalogSource := createInternalCatalogSource(t, c, crc, mainCatalogName, testNamespace, mainManifests, []apiextensions.CustomResourceDefinition{mainCRD}, []v1alpha1.ClusterServiceVersion{mainCSV}) + defer cleanupMainCatalogSource() + // Attempt to get the catalog source before creating install plan + _, err := fetchCatalogSource(t, crc, mainCatalogName, testNamespace, catalogSourceRegistryPodSynced) + require.NoError(t, err) + + subscriptionName := genName("sub-nginx-stompy-") + subscriptionCleanup := createSubscriptionForCatalog(t, crc, testNamespace, subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", v1alpha1.ApprovalAutomatic) + defer subscriptionCleanup() + + subscription, err := fetchSubscription(t, crc, testNamespace, subscriptionName, subscriptionHasInstallPlanChecker) + require.NoError(t, err) + require.NotNil(t, subscription) + require.NotNil(t, subscription.Status.InstallPlanRef) + require.Equal(t, mainCSV.GetName(), subscription.Status.CurrentCSV) + + installPlanName := subscription.Status.InstallPlanRef.Name + + // Wait for InstallPlan to be status: Complete before checking resource presence + fetchedInstallPlan, err := fetchInstallPlan(t, crc, installPlanName, buildInstallPlanPhaseCheckFunc(v1alpha1.InstallPlanPhaseComplete)) + require.NoError(t, err) + + require.Equal(t, v1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase) + + // Verify CSV is created + csv, err := awaitCSV(t, crc, testNamespace, mainCSV.GetName(), csvSucceededChecker) + require.NoError(t, err) + + modifiedEnv := []corev1.EnvVar{{Name: "EXAMPLE", Value: "value"}} + modifiedDetails := install.StrategyDetailsDeployment{ + DeploymentSpecs: []install.StrategyDeploymentSpec{ + { + Name: deploymentName, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "nginx"}, + }, + Replicas: &singleInstance, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "nginx"}, + }, + Spec: corev1.PodSpec{Containers: []corev1.Container{ + { + Name: genName("nginx"), + Image: *dummyImage, + Ports: []corev1.ContainerPort{{ContainerPort: 80}}, + ImagePullPolicy: corev1.PullIfNotPresent, + Env: modifiedEnv, + }, + }}, + }, + }, + }, + }, + Permissions: permissions, + ClusterPermissions: clusterPermissions, + } + detailsRaw, _ := json.Marshal(modifiedDetails) + csv.Spec.InstallStrategy = v1alpha1.NamedInstallStrategy{ + StrategyName: install.InstallStrategyNameDeployment, + StrategySpecRaw: detailsRaw, + } + _, err = crc.OperatorsV1alpha1().ClusterServiceVersions(testNamespace).Update(csv) + require.NoError(t, err) + + // Wait for csv to update + _, err = awaitCSV(t, crc, testNamespace, csv.GetName(), csvSucceededChecker) + require.NoError(t, err) + + // Should have the updated env var + err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { + dep, err := c.GetDeployment(testNamespace, deploymentName) + if err != nil { + return false, nil + } + if len(dep.Spec.Template.Spec.Containers[0].Env) == 0 { + return false, nil + } + return modifiedEnv[0] == dep.Spec.Template.Spec.Containers[0].Env[0], nil + }) + require.NoError(t, err) + + // Create the catalog sources + // Updated csv has the same deployment strategy as main + updatedCSV := newCSV(mainPackageStable+"-next", testNamespace, mainCSV.GetName(), semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, mainNamedStrategy) + updatedManifests := []registry.PackageManifest{ + { + PackageName: mainPackageName, + Channels: []registry.PackageChannel{ + {Name: stableChannel, CurrentCSVName: updatedCSV.GetName()}, + }, + DefaultChannelName: stableChannel, + }, + } + // Update catalog with updated CSV with more permissions + updateInternalCatalog(t, c, crc, mainCatalogName, testNamespace, []apiextensions.CustomResourceDefinition{mainCRD}, []v1alpha1.ClusterServiceVersion{mainCSV, updatedCSV}, updatedManifests) + + _, err = fetchSubscription(t, crc, testNamespace, subscriptionName, subscriptionHasInstallPlanDifferentChecker(fetchedInstallPlan.GetName())) + require.NoError(t, err) + + updatedInstallPlanName := subscription.Status.InstallPlanRef.Name + + // Wait for InstallPlan to be status: Complete before checking resource presence + fetchedUpdatedInstallPlan, err := fetchInstallPlan(t, crc, updatedInstallPlanName, buildInstallPlanPhaseCheckFunc(v1alpha1.InstallPlanPhaseComplete)) + require.NoError(t, err) + require.Equal(t, v1alpha1.InstallPlanPhaseComplete, fetchedUpdatedInstallPlan.Status.Phase) + + // Wait for csv to update + _, err = awaitCSV(t, crc, testNamespace, updatedCSV.GetName(), csvSucceededChecker) + require.NoError(t, err) + + // Should have created deployment and stomped on the env changes + updatedDep, err := c.GetDeployment(testNamespace, deploymentName) + require.NoError(t, err) + require.NotNil(t, updatedDep) + + // Should have the updated env var + var emptyEnv []corev1.EnvVar = nil + require.Equal(t, emptyEnv, updatedDep.Spec.Template.Spec.Containers[0].Env) + }) + t.Run("UpdateSingleExistingCRDOwner", func(t *testing.T) { defer cleaner.NotifyTestComplete(t, true)