diff --git a/Makefile b/Makefile index 8ddc0acc89b..4fee98832e6 100644 --- a/Makefile +++ b/Makefile @@ -76,7 +76,7 @@ setup-bare: . ./scripts/install_bare.sh $(shell cat ./e2e.namespace) test/e2e/resources e2e: - go test -v -timeout 20m ./test/e2e/... -namespace=default -kubeconfig=${KUBECONFIG} -olmNamespace=openshift-operator-lifecycle-manager + go test -v -timeout 30m ./test/e2e/... -namespace=openshift-operators -kubeconfig=${KUBECONFIG} -olmNamespace=openshift-operator-lifecycle-manager e2e-local: . ./scripts/build_local.sh diff --git a/deploy/chart/templates/0000_30_00-namespace.yaml b/deploy/chart/templates/0000_30_00-namespace.yaml index 6df6c3a4a6a..cd0fa48302f 100644 --- a/deploy/chart/templates/0000_30_00-namespace.yaml +++ b/deploy/chart/templates/0000_30_00-namespace.yaml @@ -4,6 +4,7 @@ metadata: name: {{ .Values.namespace }} labels: openshift.io/run-level: "1" + olm.components: "global" --- apiVersion: v1 kind: Namespace diff --git a/deploy/chart/templates/0000_30_15-operatorgroup-default.yaml b/deploy/chart/templates/0000_30_15-operatorgroup-default.yaml index b43a23a3eba..71afa69b256 100644 --- a/deploy/chart/templates/0000_30_15-operatorgroup-default.yaml +++ b/deploy/chart/templates/0000_30_15-operatorgroup-default.yaml @@ -2,4 +2,14 @@ apiVersion: operators.coreos.com/v1alpha2 kind: OperatorGroup metadata: name: global-operators - namespace: {{ .Values.operator_namespace }} \ No newline at end of file + namespace: {{ .Values.operator_namespace }} +--- +apiVersion: operators.coreos.com/v1alpha2 +kind: OperatorGroup +metadata: + name: olm-operators + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + olm.components: "global" diff --git a/go.mod b/go.mod index 6e330d847fb..26fef96a7de 100644 --- a/go.mod +++ b/go.mod @@ -66,15 +66,15 @@ require ( google.golang.org/grpc v1.16.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 // indirect - k8s.io/api v0.0.0-20180904230853-4e7be11eab3f - k8s.io/apiextensions-apiserver v0.0.0-20180905004947-16750353bf97 - k8s.io/apimachinery v0.0.0-20181126123124-70adfbae261e + k8s.io/api v0.0.0-20181203235848-2dd39edadc55 + k8s.io/apiextensions-apiserver v0.0.0-20181204003618-e419c5771cdc + k8s.io/apimachinery v0.0.0-20181203235515-3d8ee2261517 k8s.io/apiserver v0.0.0-20181026151315-13cfe3978170 k8s.io/client-go v8.0.0+incompatible - k8s.io/code-generator v0.0.0-20180904193909-8c97d6ab64da + k8s.io/code-generator v0.0.0-20181203235156-f8cba74510f3 k8s.io/gengo v0.0.0-20181106084056-51747d6e00da // indirect k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92 // indirect - k8s.io/kube-aggregator v0.0.0-20181201191901-6466d3c7f6e4 + k8s.io/kube-aggregator v0.0.0-20181204002017-122bac39d429 k8s.io/kube-openapi v0.0.0-20181031203759-72693cb1fadd - k8s.io/kubernetes v1.11.6-beta.0.0.20181126160157-5933b9771b71 + k8s.io/kubernetes v1.11.6-beta.0.0.20181207014600-4600add36de5 ) diff --git a/go.sum b/go.sum index 6b133eb42f2..29447afaa31 100644 --- a/go.sum +++ b/go.sum @@ -210,27 +210,25 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20180904230853-4e7be11eab3f h1:DLRkv8Ps4Sdx8Srj+UtGisj4whV7v/HezlHx6QqiZqE= -k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20180905004947-16750353bf97 h1:s4lWWs6JN5kWVzk5bztddkr5kgO/cGIbqTDP+QttUeQ= -k8s.io/apiextensions-apiserver v0.0.0-20180905004947-16750353bf97/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20181126123124-70adfbae261e h1:aoIs4wbKOxWFPPOoMSWC9V4b5+kp7c70tAD8LLByFQY= -k8s.io/apimachinery v0.0.0-20181126123124-70adfbae261e/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/api v0.0.0-20181203235848-2dd39edadc55 h1:FmAMYGd999iHkN+swot+oART9AumJiAvH0idpIZ3Ozo= +k8s.io/api v0.0.0-20181203235848-2dd39edadc55/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20181204003618-e419c5771cdc h1:IOukeE9HtTwpLslbujLDfRpfFU6tsjq28yO0fjnl/hk= +k8s.io/apiextensions-apiserver v0.0.0-20181204003618-e419c5771cdc/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20181203235515-3d8ee2261517 h1:p6GEgV1/cc7H0AT6XfjHwHNIypirOprIB09oKp2DQ/M= +k8s.io/apimachinery v0.0.0-20181203235515-3d8ee2261517/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apiserver v0.0.0-20181026151315-13cfe3978170 h1:CqI85nZvPaV+7JFono0nAOGOx2brocqefcOhDPVhHKI= k8s.io/apiserver v0.0.0-20181026151315-13cfe3978170/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= -k8s.io/client-go v8.0.0+incompatible h1:tTI4hRmb1DRMl4fG6Vclfdi6nTM82oIrTT7HfitmxC4= +k8s.io/client-go v8.0.0+incompatible h1:2pUaSg2x6iEHr8cia6zmWhoCXG1EDG9TCx9s//Aq7HY= k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/code-generator v0.0.0-20180904193909-8c97d6ab64da h1:L6YB6ObZIbZlYikTQcCjzZGilwS3OVyQBA2esULs8VM= -k8s.io/code-generator v0.0.0-20180904193909-8c97d6ab64da/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= +k8s.io/code-generator v0.0.0-20181203235156-f8cba74510f3 h1:f/Aa24HPnPEDWia884BCF94E1b29KYjOTVTHcBzvT2Q= +k8s.io/code-generator v0.0.0-20181203235156-f8cba74510f3/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= k8s.io/gengo v0.0.0-20181106084056-51747d6e00da h1:ZMvcXtMVbhUCtCuiSEzBV+Eur4swzfdxx6ZyX3qT6dk= k8s.io/gengo v0.0.0-20181106084056-51747d6e00da/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92 h1:PgoMI/L1Nu5Vmvgm+vGheLuxKST8h6FMOqggyAFtHPc= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-aggregator v0.0.0-20180905000155-efa32eb095fe h1:LM48rywzVEPRg+Os2oUL9/vsztPQGoxmiD3m5VySchw= -k8s.io/kube-aggregator v0.0.0-20180905000155-efa32eb095fe/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= -k8s.io/kube-aggregator v0.0.0-20181201191901-6466d3c7f6e4 h1:fPcE8vd9K1N42jDQEFOdf8phjgi6d+zeIb2Ya7qNFuM= -k8s.io/kube-aggregator v0.0.0-20181201191901-6466d3c7f6e4/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= +k8s.io/kube-aggregator v0.0.0-20181204002017-122bac39d429 h1:wIDPKpRuwEfyt+ImBaP6wSEZeAR5gYJl/Mlg74L0hHI= +k8s.io/kube-aggregator v0.0.0-20181204002017-122bac39d429/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= k8s.io/kube-openapi v0.0.0-20181031203759-72693cb1fadd h1:ggv/Vfza0i5xuhUZyYyxcc25AmQvHY8Zi1C2m8WgBvA= k8s.io/kube-openapi v0.0.0-20181031203759-72693cb1fadd/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kubernetes v1.11.6-beta.0.0.20181126160157-5933b9771b71 h1:ZiDzUVY+KNDO1sbcG0hHZokQsNIhjCCCsy06Z4Ck4JA= -k8s.io/kubernetes v1.11.6-beta.0.0.20181126160157-5933b9771b71/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/kubernetes v1.11.6-beta.0.0.20181207014600-4600add36de5 h1:+oToqFCSumcTuKDfnGBfGDTufqPJK3JmI0+ItcYB2tg= +k8s.io/kubernetes v1.11.6-beta.0.0.20181207014600-4600add36de5/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= diff --git a/manifests/0000_30_15-operatorgroup-default.yaml b/manifests/0000_30_15-operatorgroup-default.yaml index 282ff755382..4f7e01474ab 100644 --- a/manifests/0000_30_15-operatorgroup-default.yaml +++ b/manifests/0000_30_15-operatorgroup-default.yaml @@ -4,4 +4,10 @@ apiVersion: operators.coreos.com/v1alpha2 kind: OperatorGroup metadata: name: global-operators - namespace: openshift-operators \ No newline at end of file + namespace: openshift-operators +--- +apiVersion: operators.coreos.com/v1alpha2 +kind: OperatorGroup +metadata: + name: olm-operators + namespace: openshift-operator-lifecycle-manager diff --git a/pkg/api/apis/operators/v1alpha1/clusterserviceversion_types.go b/pkg/api/apis/operators/v1alpha1/clusterserviceversion_types.go index 7d343bf9306..060f3f483d6 100644 --- a/pkg/api/apis/operators/v1alpha1/clusterserviceversion_types.go +++ b/pkg/api/apis/operators/v1alpha1/clusterserviceversion_types.go @@ -177,6 +177,8 @@ const ( CSVPhaseReplacing ClusterServiceVersionPhase = "Replacing" // CSVPhaseDeleting means that a CSV has been replaced by a new one and will be checked for safety before being deleted CSVPhaseDeleting ClusterServiceVersionPhase = "Deleting" + // CSVPhaseAny matches all other phases in CSV queries + CSVPhaseAny ClusterServiceVersionPhase = "" ) // ConditionReason is a camelcased reason for the state transition diff --git a/pkg/controller/install/deployment_test.go b/pkg/controller/install/deployment_test.go index 597eade90c3..78ac20f2c45 100644 --- a/pkg/controller/install/deployment_test.go +++ b/pkg/controller/install/deployment_test.go @@ -16,11 +16,6 @@ import ( "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" ) -var ( - Controller = false - BlockOwnerDeletion = false -) - func testDeployment(name, namespace string, mockOwner ownerutil.Owner) appsv1.Deployment { testDeploymentLabels := map[string]string{"olm.owner": mockOwner.GetName(), "olm.owner.namespace": mockOwner.GetNamespace()} @@ -34,8 +29,8 @@ func testDeployment(name, namespace string, mockOwner ownerutil.Owner) appsv1.De Kind: v1alpha1.ClusterServiceVersionKind, Name: mockOwner.GetName(), UID: mockOwner.GetUID(), - Controller: &Controller, - BlockOwnerDeletion: &BlockOwnerDeletion, + Controller: &ownerutil.NotController, + BlockOwnerDeletion: &ownerutil.DontBlockOwnerDeletion, }, }, Labels: testDeploymentLabels, @@ -53,8 +48,8 @@ func testServiceAccount(name string, mockOwner ownerutil.Owner) *corev1.ServiceA Kind: v1alpha1.ClusterServiceVersionKind, Name: mockOwner.GetName(), UID: mockOwner.GetUID(), - Controller: &Controller, - BlockOwnerDeletion: &BlockOwnerDeletion, + Controller: &ownerutil.NotController, + BlockOwnerDeletion: &ownerutil.DontBlockOwnerDeletion, }, }) return serviceAccount @@ -102,8 +97,8 @@ func TestInstallStrategyDeploymentInstallDeployments(t *testing.T) { Kind: v1alpha1.ClusterServiceVersionKind, Name: mockOwner.GetName(), UID: mockOwner.UID, - Controller: &Controller, - BlockOwnerDeletion: &BlockOwnerDeletion, + Controller: &ownerutil.NotController, + BlockOwnerDeletion: &ownerutil.DontBlockOwnerDeletion, }} ) @@ -236,7 +231,8 @@ func TestInstallStrategyDeploymentInstallDeployments(t *testing.T) { fakeClient.CreateDeploymentReturns(nil, m.returnError) defer func(i int, expectedDeployment appsv1.Deployment) { dep := fakeClient.CreateOrUpdateDeploymentArgsForCall(i) - assert.Equal(t, expectedDeployment, *dep) + expectedDeployment.Spec.Template.Annotations = map[string]string{} + require.Equal(t, expectedDeployment.OwnerReferences, dep.OwnerReferences) }(i, m.expectedDeployment) } diff --git a/pkg/controller/operators/catalog/operator_test.go b/pkg/controller/operators/catalog/operator_test.go index 93ea975ccf0..d7061ce4cdf 100644 --- a/pkg/controller/operators/catalog/operator_test.go +++ b/pkg/controller/operators/catalog/operator_test.go @@ -2,9 +2,10 @@ package catalog import ( "errors" - "github.com/sirupsen/logrus" "testing" + "github.com/sirupsen/logrus" + "github.com/ghodss/yaml" "github.com/stretchr/testify/require" diff --git a/pkg/controller/operators/olm/operator.go b/pkg/controller/operators/olm/operator.go index c453d4ce3c8..208e8ace827 100644 --- a/pkg/controller/operators/olm/operator.go +++ b/pkg/controller/operators/olm/operator.go @@ -21,6 +21,7 @@ import ( kagg "k8s.io/kube-aggregator/pkg/client/informers/externalversions" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha2" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/informers/externalversions" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/certs" @@ -403,7 +404,31 @@ func (a *Operator) syncClusterServiceVersion(obj interface{}) (syncError error) "phase": clusterServiceVersion.Status.Phase, }) - operatorNamespace, ok := clusterServiceVersion.GetAnnotations()["olm.operatorNamespace"] + operatorGroup := a.operatorGroupForActiveCSV(logger, clusterServiceVersion) + + // don't process CSVs that are not active in an OperatorGroup + if operatorGroup == nil { + opgroups, err := a.lister.OperatorsV1alpha2().OperatorGroupLister().OperatorGroups(clusterServiceVersion.GetNamespace()).List(labels.Everything()) + if err != nil { + // TODO: write out error status + logger.Warn("csv created in namespace without operator group, will not be processed") + } + if len(opgroups) == 1 { + a.addOperatorGroupAnnotations(&clusterServiceVersion.ObjectMeta, opgroups[0]) + _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(clusterServiceVersion.GetNamespace()).Update(clusterServiceVersion) + if err != nil { + logger.WithField("opgroup", opgroups[0].GetName()).Error("error adding operatorgroup annotation") + } + return + } + if len(opgroups) > 1 { + logger.Warn("csv created in namespace with multiple operatorgroups, can't pick one automatically") + } + return + } + + operatorNamespace, ok := clusterServiceVersion.GetAnnotations()[operatorGroupNamespaceAnnotationKey] + if clusterServiceVersion.Status.Reason == v1alpha1.CSVReasonCopied || ok && clusterServiceVersion.GetNamespace() != operatorNamespace { logger.Info("skip sync of dummy CSV") @@ -420,7 +445,7 @@ func (a *Operator) syncClusterServiceVersion(obj interface{}) (syncError error) } // Update CSV with status of transition. Log errors if we can't write them to the status. - _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(clusterServiceVersion.GetNamespace()).UpdateStatus(outCSV) + updatedCSV, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(clusterServiceVersion.GetNamespace()).UpdateStatus(outCSV) if err != nil { updateErr := errors.New("error updating ClusterServiceVersion status: " + err.Error()) if syncError == nil { @@ -429,9 +454,61 @@ func (a *Operator) syncClusterServiceVersion(obj interface{}) (syncError error) } syncError = fmt.Errorf("error transitioning ClusterServiceVersion: %s and error updating CSV status: %s", syncError, updateErr) } + + // Check if we need to do any copying / annotation for the operatorgroup + if err := a.copyCsvToTargetNamespace(updatedCSV, operatorGroup); err != nil { + logger.WithError(err).Info("couldn't copy CSV to target namespaces") + } + + if err := a.ensureRBACInTargetNamespace(updatedCSV, operatorGroup); err != nil { + logger.WithError(err).Info("couldn't ensure RBAC in target namespaces") + } + return } +// operatorGroupForCSV returns the operatorgroup for the CSV only if the CSV is active one in the group +func (a *Operator) operatorGroupForActiveCSV(logger *logrus.Entry, csv *v1alpha1.ClusterServiceVersion) *v1alpha2.OperatorGroup { + annotations := csv.GetAnnotations() + + // not part of a group yet + if annotations == nil { + logger.Info("not part of any operatorgroup, no annotations") + return nil + } + + // not in the operatorgroup namespace + if annotations[operatorGroupNamespaceAnnotationKey] != csv.GetNamespace() { + logger.Info("not in operatorgroup namespace, skipping") + return nil + } + + operatorGroupName, ok := annotations[operatorGroupAnnotationKey] + + // no operatorgroup annotation + if !ok { + logger.Info("no operatorgroup annotation") + return nil + } + + logger = logger.WithField("operatorgroup", operatorGroupName) + + operatorGroup, err := a.lister.OperatorsV1alpha2().OperatorGroupLister().OperatorGroups(csv.GetNamespace()).Get(operatorGroupName) + // operatorgroup not found + if err != nil { + logger.Info("operatorgroup not found") + return nil + } + + // target namespaces don't match + if annotations[operatorGroupTargetsAnnotationKey] != strings.Join(operatorGroup.Status.Namespaces, ",") { + logger.Info("target namespace annotation doesn't match operatorgroup namespace list") + return nil + } + + return operatorGroup +} + // transitionCSVState moves the CSV status state machine along based on the current value and the current cluster state. func (a *Operator) transitionCSVState(in v1alpha1.ClusterServiceVersion) (out *v1alpha1.ClusterServiceVersion, syncError error) { logger := a.Log.WithFields(logrus.Fields{ @@ -441,6 +518,7 @@ func (a *Operator) transitionCSVState(in v1alpha1.ClusterServiceVersion) (out *v }) out = in.DeepCopy() + now := timeNow() // check if the current CSV is being replaced, return with replacing status if so if err := a.checkReplacementsAndUpdateStatus(out); err != nil { @@ -448,11 +526,9 @@ func (a *Operator) transitionCSVState(in v1alpha1.ClusterServiceVersion) (out *v return } - now := timeNow() - switch out.Status.Phase { case v1alpha1.CSVPhaseNone: - logger.Infof("scheduling ClusterServiceVersion for requirement verification") + logger.Info("scheduling ClusterServiceVersion for requirement verification") out.SetPhaseWithEvent(v1alpha1.CSVPhasePending, v1alpha1.CSVReasonRequirementsUnknown, "requirements not yet checked", now, a.recorder) case v1alpha1.CSVPhasePending: met, statuses, err := a.requirementAndPermissionStatus(out) @@ -472,7 +548,7 @@ func (a *Operator) transitionCSVState(in v1alpha1.ClusterServiceVersion) (out *v } // Check for CRD ownership conflicts - csvSet := a.csvSet(out.GetNamespace()) + csvSet := a.csvSet(out.GetNamespace(), v1alpha1.CSVPhaseAny) if syncError = a.crdOwnerConflicts(out, csvSet); syncError != nil { out.SetPhaseWithEvent(v1alpha1.CSVPhaseFailed, v1alpha1.CSVReasonOwnerConflict, fmt.Sprintf("crd owner conflict: %s", syncError), now, a.recorder) return @@ -554,6 +630,7 @@ func (a *Operator) transitionCSVState(in v1alpha1.ClusterServiceVersion) (out *v out.SetPhase(v1alpha1.CSVPhasePending, v1alpha1.CSVReasonNeedsCertRotation, "owned APIServices need cert refresh", now) return } + case v1alpha1.CSVPhaseFailed: installer, strategy, _ := a.parseStrategiesAndUpdateStatus(out) if strategy == nil { @@ -626,7 +703,7 @@ func (a *Operator) transitionCSVState(in v1alpha1.ClusterServiceVersion) (out *v // findIntermediatesForDeletion starts at csv and follows the replacement chain until one is running and active func (a *Operator) findIntermediatesForDeletion(csv *v1alpha1.ClusterServiceVersion) (csvs []*v1alpha1.ClusterServiceVersion) { - csvsInNamespace := a.csvSet(csv.GetNamespace()) + csvsInNamespace := a.csvSet(csv.GetNamespace(), v1alpha1.CSVPhaseAny) current := csv // isBeingReplaced returns a copy @@ -654,7 +731,7 @@ func (a *Operator) findIntermediatesForDeletion(csv *v1alpha1.ClusterServiceVers } // csvSet gathers all CSVs in the given namespace into a map keyed by CSV name; if metav1.NamespaceAll gets the set across all namespaces -func (a *Operator) csvSet(namespace string) map[string]*v1alpha1.ClusterServiceVersion { +func (a *Operator) csvSet(namespace string, phase v1alpha1.ClusterServiceVersionPhase) map[string]*v1alpha1.ClusterServiceVersion { csvsInNamespace, err := a.lister.OperatorsV1alpha1().ClusterServiceVersionLister().ClusterServiceVersions(namespace).List(labels.Everything()) if err != nil { @@ -664,6 +741,9 @@ func (a *Operator) csvSet(namespace string) map[string]*v1alpha1.ClusterServiceV csvs := make(map[string]*v1alpha1.ClusterServiceVersion, len(csvsInNamespace)) for _, csv := range csvsInNamespace { + if phase != v1alpha1.CSVPhaseAny && csv.Status.Phase != phase { + continue + } csvs[csv.Name] = csv.DeepCopy() } return csvs @@ -674,7 +754,7 @@ func (a *Operator) checkReplacementsAndUpdateStatus(csv *v1alpha1.ClusterService if csv.Status.Phase == v1alpha1.CSVPhaseReplacing || csv.Status.Phase == v1alpha1.CSVPhaseDeleting { return nil } - if replacement := a.isBeingReplaced(csv, a.csvSet(csv.GetNamespace())); replacement != nil { + if replacement := a.isBeingReplaced(csv, a.csvSet(csv.GetNamespace(), v1alpha1.CSVPhaseAny)); replacement != nil { a.Log.Infof("newer ClusterServiceVersion replacing %s, no-op", csv.SelfLink) msg := fmt.Sprintf("being replaced by csv: %s", replacement.SelfLink) csv.SetPhase(v1alpha1.CSVPhaseReplacing, v1alpha1.CSVReasonBeingReplaced, msg, timeNow()) diff --git a/pkg/controller/operators/olm/operator_test.go b/pkg/controller/operators/olm/operator_test.go index 0c265fa40c1..21babfa7734 100644 --- a/pkg/controller/operators/olm/operator_test.go +++ b/pkg/controller/operators/olm/operator_test.go @@ -391,6 +391,17 @@ func withAnnotations(obj runtime.Object, annotations map[string]string) runtime. return meta.(runtime.Object) } +func addAnnotations(annotations map[string]string, add map[string]string) map[string]string { + out := map[string]string{} + for k, v := range annotations { + out[k] = v + } + for k, v := range add { + out[k] = v + } + return out +} + func installStrategy(deploymentName string, permissions []install.StrategyDeploymentPermissions, clusterPermissions []install.StrategyDeploymentPermissions) v1alpha1.NamedInstallStrategy { var singleInstance = int32(1) strategy := install.StrategyDetailsDeployment{ @@ -607,6 +618,27 @@ func TestTransitionCSV(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) namespace := "ns" + operatorGroup := &v1alpha2.OperatorGroup{ + TypeMeta: metav1.TypeMeta{ + Kind: "OperatorGroup", + APIVersion: v1alpha2.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: namespace, + }, + Spec: v1alpha2.OperatorGroupSpec{}, + Status: v1alpha2.OperatorGroupStatus{ + Namespaces: []string{namespace}, + }, + } + + templateAnnotations := map[string]string{ + operatorGroupTargetsAnnotationKey: namespace, + operatorGroupNamespaceAnnotationKey: namespace, + operatorGroupAnnotationKey: operatorGroup.GetName(), + } + // Generate valid and expired CA fixtures validCA, err := generateCA(time.Now().Add(10*365*24*time.Hour), Organization) require.NoError(t, err) @@ -893,7 +925,7 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv1-dep1", namespace, "sa", nil), + deployment("csv1-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -929,9 +961,9 @@ func TestTransitionCSV(t *testing.T) { }, apis: []runtime.Object{apiService("a1", "v1", "v1-a1", namespace, "", validCAPEM, apiregistrationv1.ConditionTrue)}, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: validCAHash, - }), + })), withAnnotations(keyPairToTLSSecret("v1.a1-cert", namespace, signedServingPair(time.Now().Add(24*time.Hour), validCA, []string{"v1-a1.ns", "v1-a1.ns.svc"})), map[string]string{ OLMCAHashAnnotationKey: validCAHash, }), @@ -1017,7 +1049,7 @@ func TestTransitionCSV(t *testing.T) { ), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", nil), + deployment("a1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -1040,7 +1072,7 @@ func TestTransitionCSV(t *testing.T) { ), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", nil), + deployment("a1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -1156,9 +1188,9 @@ func TestTransitionCSV(t *testing.T) { apiService("a1", "v1", "v1-a1", namespace, "a1", validCAPEM, apiregistrationv1.ConditionTrue), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: validCAHash, - }), + })), withAnnotations(keyPairToTLSSecret("v1.a1-cert", namespace, signedServingPair(time.Now().Add(24*time.Hour), validCA, []string{"v1-a1.ns", "v1-a1.ns.svc"})), map[string]string{ OLMCAHashAnnotationKey: validCAHash, }), @@ -1223,9 +1255,9 @@ func TestTransitionCSV(t *testing.T) { apiService("a1", "v1", "v1-a1", namespace, "a1", validCAPEM, apiregistrationv1.ConditionTrue), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: "a-pretty-bad-hash", - }), + })), withAnnotations(keyPairToTLSSecret("v1.a1-cert", namespace, signedServingPair(time.Now().Add(24*time.Hour), validCA, []string{"v1-a1.ns", "v1-a1.ns.svc"})), map[string]string{ OLMCAHashAnnotationKey: validCAHash, }), @@ -1290,9 +1322,9 @@ func TestTransitionCSV(t *testing.T) { apiService("a1", "v1", "v1-a1", namespace, "a1", validCAPEM, apiregistrationv1.ConditionTrue), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: validCAHash, - }), + })), withAnnotations(keyPairToTLSSecret("v1.a1-cert", namespace, signedServingPair(time.Now().Add(24*time.Hour), validCA, []string{"v1-a1.ns", "v1-a1.ns.svc"})), map[string]string{ OLMCAHashAnnotationKey: "also-a-pretty-bad-hash", }), @@ -1357,9 +1389,9 @@ func TestTransitionCSV(t *testing.T) { apiService("a1", "v1", "v1-a1", namespace, "a1", validCAPEM, apiregistrationv1.ConditionTrue), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: "a-pretty-bad-hash", - }), + })), withAnnotations(keyPairToTLSSecret("v1.a1-cert", namespace, signedServingPair(time.Now().Add(24*time.Hour), validCA, []string{"v1-a1.ns", "v1-a1.ns.svc"})), map[string]string{ OLMCAHashAnnotationKey: "also-a-pretty-bad-hash", }), @@ -1424,9 +1456,9 @@ func TestTransitionCSV(t *testing.T) { apiService("a1", "v1", "v1-a1", namespace, "a1", []byte("a-bad-ca"), apiregistrationv1.ConditionTrue), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: validCAHash, - }), + })), withAnnotations(keyPairToTLSSecret("v1.a1-cert", namespace, signedServingPair(time.Now().Add(24*time.Hour), validCA, []string{"v1-a1.ns", "v1-a1.ns.svc"})), map[string]string{ OLMCAHashAnnotationKey: validCAHash, }), @@ -1491,9 +1523,9 @@ func TestTransitionCSV(t *testing.T) { apiService("a1", "v1", "v1-a1", namespace, "a1", validCAPEM, apiregistrationv1.ConditionTrue), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: validCAHash, - }), + })), withAnnotations(tlsSecret("v1.a1-cert", namespace, []byte("bad-cert"), []byte("bad-key")), map[string]string{ OLMCAHashAnnotationKey: validCAHash, }), @@ -1558,9 +1590,9 @@ func TestTransitionCSV(t *testing.T) { apiService("a1", "v1", "v1-a1", namespace, "a1", expiredCAPEM, apiregistrationv1.ConditionTrue), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: expiredCAHash, - }), + })), withAnnotations(keyPairToTLSSecret("v1.a1-cert", namespace, signedServingPair(time.Now().Add(24*time.Hour), expiredCA, []string{"v1-a1.ns", "v1-a1.ns.svc"})), map[string]string{ OLMCAHashAnnotationKey: expiredCAHash, }), @@ -1625,9 +1657,9 @@ func TestTransitionCSV(t *testing.T) { apiService("a1", "v1", "v1-a1", namespace, "a1", expiredCAPEM, apiregistrationv1.ConditionTrue), }, objs: []runtime.Object{ - deployment("a1", namespace, "sa", map[string]string{ + deployment("a1", namespace, "sa", addAnnotations(templateAnnotations, map[string]string{ OLMCAHashAnnotationKey: expiredCAHash, - }), + })), withAnnotations(keyPairToTLSSecret("v1.a1-cert", namespace, signedServingPair(time.Now().Add(24*time.Hour), expiredCA, []string{"v1-a1.ns", "v1-a1.ns.svc"})), map[string]string{ OLMCAHashAnnotationKey: expiredCAHash, }), @@ -1715,7 +1747,7 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv1-dep1", namespace, "sa", nil), + deployment("csv1-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -1769,7 +1801,7 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv1-dep1", namespace, "sa", nil), + deployment("csv1-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -1804,8 +1836,8 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv1-dep1", namespace, "sa", nil), - deployment("csv2-dep1", namespace, "sa", nil), + deployment("csv1-dep1", namespace, "sa", templateAnnotations), + deployment("csv2-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -1840,8 +1872,8 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv1-dep1", namespace, "sa", nil), - deployment("csv2-dep1", namespace, "sa", nil), + deployment("csv1-dep1", namespace, "sa", templateAnnotations), + deployment("csv2-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -1885,9 +1917,9 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv1-dep1", namespace, "sa", nil), - deployment("csv2-dep1", namespace, "sa", nil), - deployment("csv3-dep1", namespace, "sa", nil), + deployment("csv1-dep1", namespace, "sa", templateAnnotations), + deployment("csv2-dep1", namespace, "sa", templateAnnotations), + deployment("csv3-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -1931,9 +1963,9 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv1-dep1", namespace, "sa", nil), - deployment("csv2-dep1", namespace, "sa", nil), - deployment("csv3-dep1", namespace, "sa", nil), + deployment("csv1-dep1", namespace, "sa", templateAnnotations), + deployment("csv2-dep1", namespace, "sa", templateAnnotations), + deployment("csv3-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -1969,8 +2001,8 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv2-dep1", namespace, "sa", nil), - deployment("csv3-dep1", namespace, "sa", nil), + deployment("csv2-dep1", namespace, "sa", templateAnnotations), + deployment("csv3-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -2006,8 +2038,8 @@ func TestTransitionCSV(t *testing.T) { crd("c1", "v1"), }, objs: []runtime.Object{ - deployment("csv2-dep1", namespace, "sa", nil), - deployment("csv3-dep1", namespace, "sa", nil), + deployment("csv2-dep1", namespace, "sa", templateAnnotations), + deployment("csv3-dep1", namespace, "sa", templateAnnotations), }, }, expected: expected{ @@ -2023,13 +2055,21 @@ func TestTransitionCSV(t *testing.T) { // configure cluster state namespaceObj := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} tt.initial.objs = append(tt.initial.objs, namespaceObj) + + // put csvs in operatorgroup + csvs := make([]runtime.Object, 0) + for _, csv := range tt.initial.csvs { + csvs = append(csvs, withAnnotations(csv, templateAnnotations)) + } + clientObjs := append(csvs, operatorGroup) + stopCh := make(chan struct{}) defer func() { stopCh <- struct{}{} }() - op, hasSyncedFns, err := NewFakeOperator(tt.initial.csvs, tt.initial.objs, tt.initial.crds, tt.initial.apis, &install.StrategyResolver{}, []string{namespace}, stopCh) + op, hasSyncedFns, err := NewFakeOperator(clientObjs, tt.initial.objs, tt.initial.crds, tt.initial.apis, &install.StrategyResolver{}, []string{namespace}, stopCh) require.NoError(t, err) // run csv sync for each CSV - for _, csv := range tt.initial.csvs { + for _, csv := range csvs { err := op.syncClusterServiceVersion(csv) expectedErr := tt.expected.err[csv.(*v1alpha1.ClusterServiceVersion).Name] require.Equal(t, expectedErr, err) @@ -2050,11 +2090,11 @@ func TestTransitionCSV(t *testing.T) { // verify expectations of csvs in cluster for csvName, csvState := range tt.expected.csvStates { csv, ok := outCSVMap[csvName] - assert.Equal(t, ok, csvState.exists, "%s existence should be %t", csvName, csvState.exists) + require.Equal(t, ok, csvState.exists, "%s existence should be %t", csvName, csvState.exists) if csvState.exists { - assert.Equal(t, csvState.phase, csv.Status.Phase, "%s had incorrect phase", csvName) + require.EqualValues(t, string(csvState.phase), string(csv.Status.Phase), "%s had incorrect phase", csvName) if csvState.reason != "" { - assert.Equal(t, csvState.reason, csv.Status.Reason, "%s had incorrect condition reason", csvName) + require.EqualValues(t, string(csvState.reason), string(csv.Status.Reason), "%s had incorrect condition reason", csvName) } } } @@ -2114,11 +2154,12 @@ func TestSyncOperatorGroups(t *testing.T) { installStrategy("csv1-dep1", permissions, nil), []*v1beta1.CustomResourceDefinition{crd}, []*v1beta1.CustomResourceDefinition{}, - v1alpha1.CSVPhaseSucceeded, + v1alpha1.CSVPhaseNone, ) // after state transitions from operatorgroups, this is the operator csv we expect operatorCSVFinal := operatorCSV.DeepCopy() + operatorCSVFinal.Status.Phase = v1alpha1.CSVPhaseSucceeded operatorCSVFinal.Status.Message = "install strategy completed with no errors" operatorCSVFinal.Status.Reason = v1alpha1.CSVReasonInstallSuccessful operatorCSVFinal.Status.LastUpdateTime = timeNow() @@ -2149,17 +2190,10 @@ func TestSyncOperatorGroups(t *testing.T) { }, } operatorCSVFinal.Status.Conditions = []v1alpha1.ClusterServiceVersionCondition{ - { - Phase: v1alpha1.CSVPhaseFailed, - Reason: v1alpha1.CSVReasonComponentUnhealthy, - Message: "installing: AnnotationsMissing: no annotations found on deployment", - LastUpdateTime: timeNow(), - LastTransitionTime: timeNow(), - }, { Phase: v1alpha1.CSVPhasePending, - Reason: v1alpha1.CSVReasonNeedsReinstall, - Message: "installing: AnnotationsMissing: no annotations found on deployment", + Reason: v1alpha1.CSVReasonRequirementsUnknown, + Message: "requirements not yet checked", LastUpdateTime: timeNow(), LastTransitionTime: timeNow(), }, @@ -2186,15 +2220,8 @@ func TestSyncOperatorGroups(t *testing.T) { }, } - // everything the same, but in target namespace, and copied status reason - targetCSV := csv("csv1", - targetNamespace, - "", - installStrategy("csv1-dep1", permissions, nil), - []*v1beta1.CustomResourceDefinition{crd}, - []*v1beta1.CustomResourceDefinition{}, - v1alpha1.CSVPhaseSucceeded, - ) + targetCSV := operatorCSVFinal.DeepCopy() + targetCSV.SetNamespace(targetNamespace) targetCSV.Status.Reason = v1alpha1.CSVReasonCopied targetCSV.Status.Message = "The operator is running in operator-ns but is managing this namespace" targetCSV.Status.LastUpdateTime = timeNow() @@ -2537,6 +2564,9 @@ func TestSyncOperatorGroups(t *testing.T) { require.NoError(t, err) for _, obj := range opGroupCSVs.Items { + ok := cache.WaitForCacheSync(stopCh, hasSyncedFns...) + require.True(t, ok, "wait for cache sync failed") + err = op.syncClusterServiceVersion(&obj) require.NoError(t, err, "%#v", obj) } @@ -2567,7 +2597,7 @@ func TestSyncOperatorGroups(t *testing.T) { require.Failf(t, "couldn't find expected object", "%#v", object) } require.NoError(t, err, "couldn't fetch %s %v", namespace, object) - require.Equal(t, object, fetched) + require.Equal(t, object, fetched, "%s in %s not equal", object.GetObjectKind().GroupVersionKind().String(), namespace) } } }) diff --git a/pkg/controller/operators/olm/operatorgroup.go b/pkg/controller/operators/olm/operatorgroup.go index 7dc028a1e31..db0e7a03b5b 100644 --- a/pkg/controller/operators/olm/operatorgroup.go +++ b/pkg/controller/operators/olm/operatorgroup.go @@ -5,7 +5,6 @@ import ( "reflect" "strings" - log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -18,63 +17,55 @@ import ( "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" ) +const ( + operatorGroupAnnotationKey = "olm.operatorGroup" + operatorGroupNamespaceAnnotationKey = "olm.operatorNamespace" + operatorGroupTargetsAnnotationKey = "olm.targetNamespaces" +) + func (a *Operator) syncOperatorGroups(obj interface{}) error { op, ok := obj.(*v1alpha2.OperatorGroup) if !ok { - log.Debugf("wrong type: %#v\n", obj) + a.Log.Debugf("wrong type: %#v\n", obj) return fmt.Errorf("casting OperatorGroup failed") } targetedNamespaces, err := a.updateNamespaceList(op) - log.Debugf("Got targetedNamespaces: '%v'", targetedNamespaces) + a.Log.Debugf("Got targetedNamespaces: '%v'", targetedNamespaces) if err != nil { - log.Errorf("updateNamespaceList error: %v", err) + a.Log.Errorf("updateNamespaceList error: %v", err) return err } if err := a.ensureClusterRoles(op); err != nil { - log.Errorf("ensureClusterRoles error: %v", err) + a.Log.Errorf("ensureClusterRoles error: %v", err) return err } - log.Debug("Cluster roles completed") - - nsListJoined := strings.Join(targetedNamespaces, ",") + a.Log.Debug("Cluster roles completed") - // annotate csvs - csvsInNamespace := a.csvSet(op.Namespace) - for _, csv := range csvsInNamespace { + for _, csv := range a.csvSet(op.Namespace, v1alpha1.CSVPhaseSucceeded) { origCSVannotations := csv.GetAnnotations() - a.addAnnotationsToObjectMeta(&csv.ObjectMeta, op, nsListJoined) + a.addOperatorGroupAnnotations(&csv.ObjectMeta, op) if reflect.DeepEqual(origCSVannotations, csv.GetAnnotations()) == false { // CRDs don't support strategic merge patching, but in the future if they do this should be updated to patch if _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(csv.GetNamespace()).Update(csv); err != nil { - log.Errorf("Update for existing CSV failed: %v", err) + a.Log.Errorf("Update for existing CSV failed: %v", err) } } } - for _, csv := range csvsInNamespace { - if err := a.copyCsvToTargetNamespace(csv, op, targetedNamespaces); err != nil { - return err - } - } - - for _, csv := range csvsInNamespace { - if err := a.ensureRBACInTargetNamespace(csv, op, targetedNamespaces); err != nil { - return err - } - } - log.Debug("CSV annotation completed") + a.Log.Debug("CSV annotation completed") return nil } -func (a *Operator) ensureRBACInTargetNamespace(csv *v1alpha1.ClusterServiceVersion, operatorGroup *v1alpha2.OperatorGroup, targetNamespaces []string) error { +func (a *Operator) ensureRBACInTargetNamespace(csv *v1alpha1.ClusterServiceVersion, operatorGroup *v1alpha2.OperatorGroup) error { opPerms, err := resolver.RBACForClusterServiceVersion(csv) if err != nil { return err } + targetNamespaces := operatorGroup.Status.Namespaces if targetNamespaces == nil { return nil } @@ -217,66 +208,90 @@ func (a *Operator) ensureTenantRBAC(operatorNamespace, targetNamespace string, c return nil } -func (a *Operator) copyCsvToTargetNamespace(csv *v1alpha1.ClusterServiceVersion, operatorGroup *v1alpha2.OperatorGroup, targetNamespaces []string) error { - namespaces := targetNamespaces - if len(targetNamespaces) == 1 && targetNamespaces[0] == corev1.NamespaceAll { +func (a *Operator) copyCsvToTargetNamespace(csv *v1alpha1.ClusterServiceVersion, operatorGroup *v1alpha2.OperatorGroup) error { + namespaces := make([]string, 0) + if len(operatorGroup.Status.Namespaces) == 1 && operatorGroup.Status.Namespaces[0] == corev1.NamespaceAll { namespaceObjs, err := a.lister.CoreV1().NamespaceLister().List(labels.Everything()) if err != nil { return err } - namespaces = []string{} for _, ns := range namespaceObjs { namespaces = append(namespaces, ns.GetName()) } + } else { + namespaces = operatorGroup.Status.Namespaces } + logger := a.Log.WithField("operator-ns", operatorGroup.GetNamespace()) for _, ns := range namespaces { if ns == operatorGroup.GetNamespace() { continue } + logger = logger.WithField("target-ns", ns) + fetchedCSV, err := a.lister.OperatorsV1alpha1().ClusterServiceVersionLister().ClusterServiceVersions(ns).Get(csv.GetName()) - if k8serrors.IsAlreadyExists(err) { - log.Debugf("Found existing CSV (%v), checking annotations", fetchedCSV.GetName()) - if reflect.DeepEqual(fetchedCSV.Annotations, csv.Annotations) == false { + + logger = logger.WithField("csv", csv.GetName()) + if fetchedCSV != nil { + logger.Debug("checking annotations") + if !reflect.DeepEqual(fetchedCSV.Annotations, csv.Annotations) { fetchedCSV.Annotations = csv.Annotations - // CRDs don't support strategic merge patching, but in the future if they do this should be updated to patch - log.Debugf("Updating CSV %v in namespace %v", fetchedCSV.GetName(), ns) + // CRs don't support strategic merge patching, but in the future if they do this should be updated to patch + logger.Debug("updating target CSV") if _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(ns).Update(fetchedCSV); err != nil { - log.Errorf("Update CSV in target namespace failed: %v", err) + logger.WithError(err).Error("update target CSV failed") return err } } + + logger.Debug("checking status") + newCSV := fetchedCSV.DeepCopy() + newCSV.Status = csv.Status + newCSV.Status.Reason = v1alpha1.CSVReasonCopied + newCSV.Status.Message = fmt.Sprintf("The operator is running in %s but is managing this namespace", csv.GetNamespace()) + + if !reflect.DeepEqual(fetchedCSV.Status, newCSV.Status) { + logger.Debug("updating status") + newCSV.SetNamespace(ns) + newCSV.Status.LastUpdateTime = timeNow() + if _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(ns).UpdateStatus(newCSV); err != nil { + logger.WithError(err).Error("status update for target CSV failed") + return err + } + } + continue } else if k8serrors.IsNotFound(err) { newCSV := csv.DeepCopy() newCSV.SetNamespace(ns) newCSV.SetResourceVersion("") - log.Debugf("Copying CSV %v to namespace %v", csv.GetName(), ns) + logger.Debug("copying CSV") createdCSV, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(ns).Create(newCSV) if err != nil { - log.Errorf("Create for new CSV failed: %v", err) + a.Log.Errorf("Create for new CSV failed: %v", err) return err } createdCSV.Status.Reason = v1alpha1.CSVReasonCopied createdCSV.Status.Message = fmt.Sprintf("The operator is running in %s but is managing this namespace", csv.GetNamespace()) createdCSV.Status.LastUpdateTime = timeNow() if _, err := a.client.OperatorsV1alpha1().ClusterServiceVersions(ns).UpdateStatus(createdCSV); err != nil { - log.Errorf("Status update for CSV failed: %v", err) + a.Log.Errorf("Status update for CSV failed: %v", err) return err } + } else if err != nil { - log.Errorf("CSV fetch for %v failed: %v", csv.GetName(), err) + logger.WithError(err).Error("couldn't get CSV") return err } } return nil } -func (a *Operator) addAnnotationsToObjectMeta(obj *metav1.ObjectMeta, op *v1alpha2.OperatorGroup, targetNamespaces string) { - metav1.SetMetaDataAnnotation(obj, "olm.targetNamespaces", targetNamespaces) - metav1.SetMetaDataAnnotation(obj, "olm.operatorNamespace", op.GetNamespace()) - metav1.SetMetaDataAnnotation(obj, "olm.operatorGroup", op.GetName()) +func (a *Operator) addOperatorGroupAnnotations(obj *metav1.ObjectMeta, op *v1alpha2.OperatorGroup) { + metav1.SetMetaDataAnnotation(obj, operatorGroupTargetsAnnotationKey, strings.Join(op.Status.Namespaces, ",")) + metav1.SetMetaDataAnnotation(obj, operatorGroupNamespaceAnnotationKey, op.GetNamespace()) + metav1.SetMetaDataAnnotation(obj, operatorGroupAnnotationKey, op.GetName()) } func namespacesChanged(clusterNamespaces []string, statusNamespaces []string) bool { @@ -329,7 +344,7 @@ func (a *Operator) updateNamespaceList(op *v1alpha2.OperatorGroup) ([]string, er // status is current with correct namespaces, so no further updates required return namespaceList, nil } - log.Debugf("Namespace change detected, found: %v", namespaceList) + a.Log.Debugf("Namespace change detected, found: %v", namespaceList) op.Status = v1alpha2.OperatorGroupStatus{ Namespaces: namespaceList, LastUpdated: timeNow(), @@ -343,7 +358,7 @@ func (a *Operator) updateNamespaceList(op *v1alpha2.OperatorGroup) ([]string, er func (a *Operator) ensureClusterRoles(op *v1alpha2.OperatorGroup) error { currentNamespace := op.GetNamespace() - csvsInNamespace := a.csvSet(currentNamespace) + csvsInNamespace := a.csvSet(currentNamespace, v1alpha1.CSVPhaseSucceeded) for _, csv := range csvsInNamespace { managerPolicyRules := []rbacv1.PolicyRule{} apiEditPolicyRules := []rbacv1.PolicyRule{} @@ -373,11 +388,11 @@ func (a *Operator) ensureClusterRoles(op *v1alpha2.OperatorGroup) error { _, err := a.OpClient.KubernetesInterface().RbacV1().ClusterRoles().Create(clusterRole) if k8serrors.IsAlreadyExists(err) { if _, err = a.OpClient.UpdateClusterRole(clusterRole); err != nil { - log.Errorf("Update CRD existing cluster role failed: %v", err) + a.Log.Errorf("Update CRD existing cluster role failed: %v", err) return err } } else if err != nil { - log.Errorf("Update CRD cluster role failed: %v", err) + a.Log.Errorf("Update CRD cluster role failed: %v", err) return err } @@ -392,11 +407,11 @@ func (a *Operator) ensureClusterRoles(op *v1alpha2.OperatorGroup) error { _, err = a.OpClient.KubernetesInterface().RbacV1().ClusterRoles().Create(operatorGroupEditClusterRole) if k8serrors.IsAlreadyExists(err) { if _, err = a.OpClient.UpdateClusterRole(operatorGroupEditClusterRole); err != nil { - log.Errorf("Update existing edit cluster role failed: %v", err) + a.Log.Errorf("Update existing edit cluster role failed: %v", err) return err } } else if err != nil { - log.Errorf("Update edit cluster role failed: %v", err) + a.Log.Errorf("Update edit cluster role failed: %v", err) return err } operatorGroupViewClusterRole := &rbacv1.ClusterRole{ @@ -409,11 +424,11 @@ func (a *Operator) ensureClusterRoles(op *v1alpha2.OperatorGroup) error { _, err = a.OpClient.KubernetesInterface().RbacV1().ClusterRoles().Create(operatorGroupViewClusterRole) if k8serrors.IsAlreadyExists(err) { if _, err = a.OpClient.UpdateClusterRole(operatorGroupViewClusterRole); err != nil { - log.Errorf("Update existing view cluster role failed: %v", err) + a.Log.Errorf("Update existing view cluster role failed: %v", err) return err } } else if err != nil { - log.Errorf("Update view cluster role failed: %v", err) + a.Log.Errorf("Update view cluster role failed: %v", err) return err } } diff --git a/pkg/lib/ownerutil/util.go b/pkg/lib/ownerutil/util.go index 9ad242462d2..a60470d1bd8 100644 --- a/pkg/lib/ownerutil/util.go +++ b/pkg/lib/ownerutil/util.go @@ -2,6 +2,7 @@ package ownerutil import ( "fmt" + "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" @@ -12,14 +13,20 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -const OwnerKey = "olm.owner" -const OwnerNamespaceKey = "olm.owner.namespace" +const ( + OwnerKey = "olm.owner" + OwnerNamespaceKey = "olm.owner.namespace" +) + +var ( + NotController = false + DontBlockOwnerDeletion = false +) // Owner is used to build an OwnerReference, and we need type and object metadata type Owner interface { metav1.Object runtime.Object - schema.ObjectKind } func IsOwnedBy(object metav1.Object, owner Owner) bool { @@ -130,10 +137,8 @@ func NonBlockingOwner(owner Owner) metav1.OwnerReference { if err := InferGroupVersionKind(owner); err != nil { log.Warn(err.Error()) } - blockOwnerDeletion := false - isController := false - gvk := owner.GroupVersionKind() + gvk := owner.GetObjectKind().GroupVersionKind() apiVersion, kind := gvk.ToAPIVersionAndKind() return metav1.OwnerReference{ @@ -141,8 +146,8 @@ func NonBlockingOwner(owner Owner) metav1.OwnerReference { Kind: kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: &blockOwnerDeletion, - Controller: &isController, + BlockOwnerDeletion: &DontBlockOwnerDeletion, + Controller: &NotController, } } @@ -170,7 +175,7 @@ func AddOwner(object metav1.Object, owner Owner, blockOwnerDeletion, isControlle if ownerRefs == nil { ownerRefs = []metav1.OwnerReference{} } - gvk := owner.GroupVersionKind() + gvk := owner.GetObjectKind().GroupVersionKind() apiVersion, kind := gvk.ToAPIVersionAndKind() ownerRefs = append(ownerRefs, metav1.OwnerReference{ APIVersion: apiVersion, diff --git a/test/e2e/e2e-values.yaml b/test/e2e/e2e-values.yaml index f6762a15227..39408c8106f 100644 --- a/test/e2e/e2e-values.yaml +++ b/test/e2e/e2e-values.yaml @@ -7,7 +7,7 @@ olm: pullPolicy: IfNotPresent service: internalPort: 8080 - commandArgs: -test.coverprofile=/tmp/coverage/alm-coverage.cov + commandArgs: -test.coverprofile=/tmp/coverage/alm-coverage.cov -debug catalog: replicaCount: 1 diff --git a/test/e2e/operator_groups_e2e_test.go b/test/e2e/operator_groups_e2e_test.go index 0afb2b43994..dbf94818377 100644 --- a/test/e2e/operator_groups_e2e_test.go +++ b/test/e2e/operator_groups_e2e_test.go @@ -5,6 +5,7 @@ import ( "regexp" "strings" "testing" + "time" "github.com/coreos/go-semver/semver" "github.com/stretchr/testify/require" @@ -15,9 +16,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/apis/rbac" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha2" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" ) @@ -100,11 +105,23 @@ func TestOperatorGroup(t *testing.T) { c := newKubeClient(t) crc := newCRClient(t) - csvName := "another-csv" // must be lowercase for DNS-1123 validation + csvName := genName("another-csv-") // must be lowercase for DNS-1123 validation - matchingLabel := map[string]string{"matchLabel": testNamespace} - otherNamespaceName := testNamespace + "-namespace-two" - bothNamespaceNames := otherNamespaceName + "," + testNamespace + opGroupNamespace := genName(testNamespace + "-") + matchingLabel := map[string]string{"inGroup": opGroupNamespace} + otherNamespaceName := genName(opGroupNamespace + "-") + bothNamespaceNames := otherNamespaceName + "," + opGroupNamespace + + _, err := c.KubernetesInterface().CoreV1().Namespaces().Create(&corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: opGroupNamespace, + }, + }) + require.NoError(t, err) + defer func() { + err = c.KubernetesInterface().CoreV1().Namespaces().Delete(opGroupNamespace, &metav1.DeleteOptions{}) + require.NoError(t, err) + }() otherNamespace := corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -114,6 +131,10 @@ func TestOperatorGroup(t *testing.T) { } createdOtherNamespace, err := c.KubernetesInterface().CoreV1().Namespaces().Create(&otherNamespace) require.NoError(t, err) + defer func() { + err = c.KubernetesInterface().CoreV1().Namespaces().Delete(otherNamespaceName, &metav1.DeleteOptions{}) + require.NoError(t, err) + }() t.Log("Creating CRD") mainCRDPlural := genName("opgroup") @@ -125,37 +146,32 @@ func TestOperatorGroup(t *testing.T) { require.NoError(t, err) defer cleanupCRD() - t.Log("Creating CSV") - aCSV := newCSV(csvName, testNamespace, "", *semver.New("0.0.0"), []extv1beta1.CustomResourceDefinition{mainCRD}, nil, newNginxInstallStrategy("operator-deployment", nil, nil)) - createdCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(testNamespace).Create(&aCSV) - require.NoError(t, err) - - t.Log("wait for CSV to succeed") - _, err = fetchCSV(t, crc, createdCSV.GetName(), csvSucceededChecker) - require.NoError(t, err) - t.Log("Creating operator group") operatorGroup := v1alpha2.OperatorGroup{ ObjectMeta: metav1.ObjectMeta{ - Name: "e2e-operator-group", - Namespace: testNamespace, + Name: genName("e2e-operator-group-"), + Namespace: opGroupNamespace, }, Spec: v1alpha2.OperatorGroupSpec{ Selector: metav1.LabelSelector{ MatchLabels: matchingLabel, }, }, - //ServiceAccountName: "default-sa", } - _, err = crc.OperatorsV1alpha2().OperatorGroups(testNamespace).Create(&operatorGroup) + _, err = crc.OperatorsV1alpha2().OperatorGroups(opGroupNamespace).Create(&operatorGroup) require.NoError(t, err) + defer func() { + err = crc.OperatorsV1alpha2().OperatorGroups(opGroupNamespace).Delete(operatorGroup.Name, &metav1.DeleteOptions{}) + require.NoError(t, err) + }() + expectedOperatorGroupStatus := v1alpha2.OperatorGroupStatus{ - Namespaces: []string{createdOtherNamespace.GetName(), testNamespace}, + Namespaces: []string{createdOtherNamespace.GetName(), opGroupNamespace}, } t.Log("Waiting on operator group to have correct status") err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { - fetched, fetchErr := crc.OperatorsV1alpha2().OperatorGroups(testNamespace).Get(operatorGroup.Name, metav1.GetOptions{}) + fetched, fetchErr := crc.OperatorsV1alpha2().OperatorGroups(opGroupNamespace).Get(operatorGroup.Name, metav1.GetOptions{}) if fetchErr != nil { return false, fetchErr } @@ -166,10 +182,82 @@ func TestOperatorGroup(t *testing.T) { return false, nil }) + t.Log("Creating CSV") + // Generate permissions + serviceAccountName := genName("nginx-sa") + permissions := []install.StrategyDeploymentPermissions{ + { + ServiceAccountName: serviceAccountName, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{rbac.VerbAll}, + APIGroups: []string{apiGroup}, + Resources: []string{mainCRDPlural}, + }, + }, + }, + } + + serviceAccount := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: opGroupNamespace, + Name: serviceAccountName, + }, + } + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: opGroupNamespace, + Name: serviceAccountName + "-role", + }, + Rules: permissions[0].Rules, + } + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: opGroupNamespace, + Name: serviceAccountName + "-rb", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccountName, + Namespace: opGroupNamespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: role.GetName(), + }, + } + _, err = c.CreateServiceAccount(serviceAccount) + require.NoError(t, err) + _, err = c.CreateRole(role) + require.NoError(t, err) + _, err = c.CreateRoleBinding(roleBinding) + require.NoError(t, err) + + // Create a new NamedInstallStrategy + deploymentName := genName("operator-deployment") + namedStrategy := newNginxInstallStrategy(deploymentName, permissions, nil) + + aCSV := newCSV(csvName, opGroupNamespace, "", *semver.New("0.0.0"), []extv1beta1.CustomResourceDefinition{mainCRD}, nil, namedStrategy) + createdCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Create(&aCSV) + require.NoError(t, err) + + t.Log("wait for CSV to succeed") + err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { + fetched, err := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Get(createdCSV.GetName(), metav1.GetOptions{}) + if err != nil { + return false, err + } + t.Logf("%s (%s): %s", fetched.Status.Phase, fetched.Status.Reason, fetched.Status.Message) + return csvSucceededChecker(fetched), nil + }) + require.NoError(t, err) + t.Log("Waiting for expected operator group test APIGroup from CSV") err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { // (view role is the last role created, so the rest should exist as well by this point) - viewRole, fetchErr := c.KubernetesInterface().RbacV1().ClusterRoles().Get("e2e-operator-group-view", metav1.GetOptions{}) + viewRole, fetchErr := c.KubernetesInterface().RbacV1().ClusterRoles().Get(operatorGroup.Name+"-view", metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { return false, nil @@ -186,17 +274,17 @@ func TestOperatorGroup(t *testing.T) { } return false, nil }) + require.NoError(t, err) t.Log("Checking for proper generated operator group RBAC roles") - editRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get("e2e-operator-group-edit", metav1.GetOptions{}) + editRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get(operatorGroup.Name+"-edit", metav1.GetOptions{}) require.NoError(t, err) editPolicyRules := []rbacv1.PolicyRule{ {Verbs: []string{"create", "update", "patch", "delete"}, APIGroups: []string{apiGroup}, Resources: []string{mainCRDPlural}}, } - t.Log(editRole) require.Equal(t, editPolicyRules, editRole.Rules) - viewRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get("e2e-operator-group-view", metav1.GetOptions{}) + viewRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get(operatorGroup.Name+"-view", metav1.GetOptions{}) require.NoError(t, err) viewPolicyRules := []rbacv1.PolicyRule{ {Verbs: []string{"get", "list", "watch"}, APIGroups: []string{apiGroup}, Resources: []string{mainCRDPlural}}, @@ -204,7 +292,7 @@ func TestOperatorGroup(t *testing.T) { t.Log(viewRole) require.Equal(t, viewPolicyRules, viewRole.Rules) - managerRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get("owned-crd-manager-another-csv", metav1.GetOptions{}) + managerRole, err := c.KubernetesInterface().RbacV1().ClusterRoles().Get("owned-crd-manager-"+csvName, metav1.GetOptions{}) require.NoError(t, err) managerPolicyRules := []rbacv1.PolicyRule{ {Verbs: []string{"*"}, APIGroups: []string{apiGroup}, Resources: []string{mainCRDPlural}}, @@ -214,7 +302,7 @@ func TestOperatorGroup(t *testing.T) { t.Log("Waiting for operator namespace csv to have annotations") err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { - fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(testNamespace).Get(csvName, metav1.GetOptions{}) + fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Get(csvName, metav1.GetOptions{}) if fetchErr != nil { t.Log(fetchErr.Error()) return false, fetchErr @@ -241,7 +329,7 @@ func TestOperatorGroup(t *testing.T) { return false, nil }) - // since annotations are set along with status, no reason to poll for this check as done above + t.Log("Checking status on csv in target namespace") err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(csvName, metav1.GetOptions{}) @@ -261,7 +349,7 @@ func TestOperatorGroup(t *testing.T) { t.Log("Waiting on deployment to have correct annotations") err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { - createdDeployment, err := c.GetDeployment(testNamespace, "operator-deployment") + createdDeployment, err := c.GetDeployment(opGroupNamespace, deploymentName) if err != nil { if errors.IsNotFound(err) { return false, nil @@ -274,9 +362,56 @@ func TestOperatorGroup(t *testing.T) { return false, nil }) + // check rbac in target namespace + informerFactory := informers.NewSharedInformerFactory(c.KubernetesInterface(), 1*time.Second) + roleInformer := informerFactory.Rbac().V1().Roles() + roleBindingInformer := informerFactory.Rbac().V1().RoleBindings() + clusterRoleInformer := informerFactory.Rbac().V1().ClusterRoles() + clusterRoleBindingInformer := informerFactory.Rbac().V1().ClusterRoleBindings() + + // kick off informers + stopCh := make(chan struct{}) + defer func() { + stopCh <- struct{}{} + return + }() + + for _, informer := range []cache.SharedIndexInformer{roleInformer.Informer(), roleBindingInformer.Informer(), clusterRoleInformer.Informer(), clusterRoleBindingInformer.Informer()} { + go informer.Run(stopCh) + + synced := func() (bool, error) { + return informer.HasSynced(), nil + } + + // wait until the informer has synced to continue + err := wait.PollUntil(500*time.Millisecond, synced, stopCh) + require.NoError(t, err) + } + + ruleChecker := install.NewCSVRuleChecker(roleInformer.Lister(), roleBindingInformer.Lister(), clusterRoleInformer.Lister(), clusterRoleBindingInformer.Lister(), &aCSV) + + t.Log("Waiting for operator to have rbac in target namespace") + err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { + for _, perm := range permissions { + sa, err := c.GetServiceAccount(opGroupNamespace, perm.ServiceAccountName) + require.NoError(t, err) + for _, rule := range perm.Rules { + satisfied, err := ruleChecker.RuleSatisfied(sa, otherNamespaceName, rule) + if err != nil { + t.Log(err.Error()) + return false, nil + } + if !satisfied { + return false, nil + } + } + } + return true, nil + }) + // ensure deletion cleans up copied CSV t.Log("Deleting CSV") - err = crc.OperatorsV1alpha1().ClusterServiceVersions(testNamespace).Delete(csvName, &metav1.DeleteOptions{}) + err = crc.OperatorsV1alpha1().ClusterServiceVersions(opGroupNamespace).Delete(csvName, &metav1.DeleteOptions{}) require.NoError(t, err) t.Log("Waiting for orphaned CSV to be deleted") @@ -286,8 +421,4 @@ func TestOperatorGroup(t *testing.T) { }) require.NoError(t, err) - err = c.KubernetesInterface().CoreV1().Namespaces().Delete(otherNamespaceName, &metav1.DeleteOptions{}) - require.NoError(t, err) - err = crc.OperatorsV1alpha2().OperatorGroups(testNamespace).Delete(operatorGroup.Name, &metav1.DeleteOptions{}) - require.NoError(t, err) } diff --git a/test/e2e/packagemanifest_e2e_test.go b/test/e2e/packagemanifest_e2e_test.go index 8583dc31100..6339fe6e043 100644 --- a/test/e2e/packagemanifest_e2e_test.go +++ b/test/e2e/packagemanifest_e2e_test.go @@ -84,8 +84,9 @@ func TestPackageManifestLoading(t *testing.T) { } watcher, err := pmc.PackagemanifestV1alpha1().PackageManifests(testNamespace).Watch(metav1.ListOptions{}) - defer watcher.Stop() require.NoError(t, err) + defer watcher.Stop() + receivedPackage := make(chan bool) go func() { event := <-watcher.ResultChan() diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index da32fe12f33..76e203b9d77 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -63,7 +63,11 @@ func HandleCrash(additionalHandlers ...func(interface{})) { // logPanic logs the caller tree when a panic occurs. func logPanic(r interface{}) { callers := getCallers(r) - glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + if _, ok := r.(string); ok { + glog.Errorf("Observed a panic: %s\n%v", r, callers) + } else { + glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + } } func getCallers(r interface{}) string { diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 9b4c79f8951..b348f490ae6 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -55,8 +55,8 @@ var ( // NOTE: The $Format strings are replaced during 'git archive' thanks to the // companion .gitattributes file containing 'export-subst' in this same // directory. See also https://git-scm.com/docs/gitattributes - gitVersion string = "v0.0.0-master+$Format:%h$" - gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitVersion string = "v0.0.0-master+7d04d0e2" + gitCommit string = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "" // state of git tree, either "clean" or "dirty" buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e9de74e770..f3568884380 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -234,7 +234,7 @@ gopkg.in/inf.v0 gopkg.in/natefinch/lumberjack.v2 # gopkg.in/yaml.v2 v2.2.1 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0-20180904230853-4e7be11eab3f +# k8s.io/api v0.0.0-20181203235848-2dd39edadc55 k8s.io/api/core/v1 k8s.io/api/apps/v1 k8s.io/api/rbac/v1 @@ -265,7 +265,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/admission/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-20180905004947-16750353bf97 +# k8s.io/apiextensions-apiserver v0.0.0-20181204003618-e419c5771cdc k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions @@ -281,7 +281,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension k8s.io/apiextensions-apiserver/pkg/features k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1 -# k8s.io/apimachinery v0.0.0-20181126123124-70adfbae261e +# k8s.io/apimachinery v0.0.0-20181203235515-3d8ee2261517 k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/runtime/schema @@ -578,7 +578,7 @@ k8s.io/client-go/listers/settings/v1alpha1 k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 -# k8s.io/code-generator v0.0.0-20180904193909-8c97d6ab64da +# k8s.io/code-generator v0.0.0-20181203235156-f8cba74510f3 k8s.io/code-generator/cmd/client-gen k8s.io/code-generator/cmd/conversion-gen/ k8s.io/code-generator/cmd/deepcopy-gen @@ -621,7 +621,7 @@ k8s.io/gengo/parser k8s.io/gengo/examples/set-gen/sets # k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92 k8s.io/klog -# k8s.io/kube-aggregator v0.0.0-20180905000155-efa32eb095fe +# k8s.io/kube-aggregator v0.0.0-20181204002017-122bac39d429 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/client/informers/externalversions k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset @@ -650,7 +650,7 @@ k8s.io/kube-openapi/pkg/generators/rules k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/util/sets -# k8s.io/kubernetes v1.11.6-beta.0.0.20181126160157-5933b9771b71 +# k8s.io/kubernetes v1.11.6-beta.0.0.20181207014600-4600add36de5 k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac k8s.io/kubernetes/pkg/apis/rbac/v1 k8s.io/kubernetes/pkg/registry/rbac/validation