From 18b182fd50bc6bb3e48a1d1d547698e7ce4fa4b1 Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Wed, 24 May 2023 15:44:24 -0400 Subject: [PATCH] (feat): (testing): Add extensible unpacking interface + controller tests (#65) Signed-off-by: Bryce Palmer Signed-off-by: Bryce Palmer --- cmd/manager/main.go | 18 +- ...atalogd.operatorframework.io_catalogs.yaml | 65 +++- config/crd/kustomization.yaml | 8 + config/crd/patches/catalog_validation.yaml | 6 + config/rbac/role.yaml | 13 +- config/samples/core_v1beta1_catalog.yaml | 14 +- go.mod | 3 + go.sum | 5 + internal/source/image.go | 272 +++++++++++++ internal/source/unpacker.go | 115 ++++++ pkg/apis/core/v1beta1/catalog_types.go | 50 ++- .../core/v1beta1/zz_generated.deepcopy.go | 44 ++- pkg/controllers/core/catalog_controller.go | 288 ++++---------- .../core/catalog_controller_test.go | 358 ++++++++++++++++++ pkg/controllers/core/suite_test.go | 78 ++++ 15 files changed, 1071 insertions(+), 266 deletions(-) create mode 100644 config/crd/patches/catalog_validation.yaml create mode 100644 internal/source/image.go create mode 100644 internal/source/unpacker.go create mode 100644 pkg/controllers/core/catalog_controller_test.go create mode 100644 pkg/controllers/core/suite_test.go diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 3ded2f6e..23ac9b43 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "github.com/operator-framework/catalogd/internal/source" "github.com/operator-framework/catalogd/internal/version" "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" corecontrollers "github.com/operator-framework/catalogd/pkg/controllers/core" @@ -56,16 +57,19 @@ func main() { metricsAddr string enableLeaderElection bool probeAddr string - opmImage string + unpackImage string profiling bool catalogdVersion bool + sysNs string ) flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - flag.StringVar(&opmImage, "opm-image", "quay.io/operator-framework/opm:v1.26", "The opm image to use when unpacking catalog images") + // TODO: should we move the unpacker to some common place? Or... hear me out... should catalogd just be a rukpak provisioner? + flag.StringVar(&unpackImage, "unpack-image", "quay.io/operator-framework/rukpak:v0.12.0", "The unpack image to use when unpacking catalog images") + flag.StringVar(&sysNs, "system-ns", "catalogd-system", "The namespace catalogd uses for internal state, configuration, and workloads") opts := zap.Options{ Development: true, } @@ -94,11 +98,15 @@ func main() { os.Exit(1) } + unpacker, err := source.NewDefaultUnpacker(mgr, sysNs, unpackImage) + if err != nil { + setupLog.Error(err, "unable to create unpacker") + os.Exit(1) + } + if err = (&corecontrollers.CatalogReconciler{ Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Cfg: mgr.GetConfig(), - OpmImage: opmImage, + Unpacker: unpacker, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Catalog") os.Exit(1) diff --git a/config/crd/bases/catalogd.operatorframework.io_catalogs.yaml b/config/crd/bases/catalogd.operatorframework.io_catalogs.yaml index e491c793..48fe4c1c 100644 --- a/config/crd/bases/catalogd.operatorframework.io_catalogs.yaml +++ b/config/crd/bases/catalogd.operatorframework.io_catalogs.yaml @@ -34,19 +34,33 @@ spec: spec: description: CatalogSpec defines the desired state of Catalog properties: - image: - description: Image is the Catalog image that contains Operators' metadata - in the FBC format https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs - type: string - pollingInterval: - description: PollingInterval is used to determine the time interval - between checks of the latest index image version. The image is polled - to see if a new version of the image is available. If available, - the latest image is pulled and the cache is updated to contain the - new content. - type: string + source: + description: Source is the source of a Catalog that contains Operators' + metadata in the FBC format https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs + properties: + image: + description: Image is the catalog image that backs the content + of this catalog. + properties: + pullSecret: + description: PullSecret contains the name of the image pull + secret in the namespace that catalogd is deployed. + type: string + ref: + description: Ref contains the reference to a container image + containing Catalog contents. + type: string + required: + - ref + type: object + type: + description: Type defines the kind of Catalog content being sourced. + type: string + required: + - type + type: object required: - - image + - source type: object status: description: CatalogStatus defines the observed state of Catalog @@ -121,6 +135,33 @@ spec: - type type: object type: array + phase: + type: string + resolvedSource: + description: CatalogSource contains the sourcing information for a + Catalog + properties: + image: + description: Image is the catalog image that backs the content + of this catalog. + properties: + pullSecret: + description: PullSecret contains the name of the image pull + secret in the namespace that catalogd is deployed. + type: string + ref: + description: Ref contains the reference to a container image + containing Catalog contents. + type: string + required: + - ref + type: object + type: + description: Type defines the kind of Catalog content being sourced. + type: string + required: + - type + type: object type: object type: object served: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 78113776..06ea99ae 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -6,3 +6,11 @@ resources: - bases/catalogd.operatorframework.io_packages.yaml - bases/catalogd.operatorframework.io_catalogs.yaml #+kubebuilder:scaffold:crdkustomizeresource + +patches: +- path: patches/catalog_validation.yaml + target: + group: apiextensions.k8s.io + version: v1 + kind: CustomResourceDefinition + name: catalogs.catalogd.operatorframework.io \ No newline at end of file diff --git a/config/crd/patches/catalog_validation.yaml b/config/crd/patches/catalog_validation.yaml new file mode 100644 index 00000000..0ba9cda4 --- /dev/null +++ b/config/crd/patches/catalog_validation.yaml @@ -0,0 +1,6 @@ +# Union source type +- op: add + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/source/oneOf + value: + - required: + - image \ No newline at end of file diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 3e8083b7..1d0060f2 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,15 +4,6 @@ kind: ClusterRole metadata: name: manager-role rules: -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - get - - list - - watch - apiGroups: - catalogd.operatorframework.io resources: @@ -96,8 +87,12 @@ rules: resources: - pods verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - "" diff --git a/config/samples/core_v1beta1_catalog.yaml b/config/samples/core_v1beta1_catalog.yaml index 98a73e0a..400c14be 100644 --- a/config/samples/core_v1beta1_catalog.yaml +++ b/config/samples/core_v1beta1_catalog.yaml @@ -1,13 +1,9 @@ apiVersion: catalogd.operatorframework.io/v1beta1 kind: Catalog metadata: - labels: - app.kubernetes.io/name: catalog - app.kubernetes.io/instance: catalog-sample - app.kubernetes.io/part-of: catalogd - app.kuberentes.io/managed-by: kustomize - app.kubernetes.io/created-by: catalogd - name: catalog-sample + name: operatorhubio spec: - image: quay.io/operatorhubio/catalog:latest - pollingInterval: 45m + source: + type: image + image: + ref: quay.io/operatorhubio/catalog:latest \ No newline at end of file diff --git a/go.mod b/go.mod index 83a1a807..7da2f647 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,9 @@ module github.com/operator-framework/catalogd go 1.19 require ( + github.com/nlepage/go-tarfs v1.1.0 + github.com/onsi/ginkgo/v2 v2.6.0 + github.com/onsi/gomega v1.24.1 github.com/operator-framework/operator-registry v1.26.3 k8s.io/api v0.26.0 k8s.io/apimachinery v0.26.0 diff --git a/go.sum b/go.sum index 1618dd9a..d5188f80 100644 --- a/go.sum +++ b/go.sum @@ -240,8 +240,12 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nlepage/go-tarfs v1.1.0 h1:bsACOiZMB/zFjYG/sE01070i9Fl26MnRpw0L6WuyfVs= +github.com/nlepage/go-tarfs v1.1.0/go.mod h1:IhxRcLhLkawBetnwu/JNuoPkq/6cclAllhgEa6SmzS8= github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= +github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= github.com/operator-framework/api v0.17.2-0.20220915200120-ff2dbc53d381 h1:/XHgTzfI0O/RP3I6WF0BiPLVuVkfgVyiw04b0MyCJ2M= github.com/operator-framework/api v0.17.2-0.20220915200120-ff2dbc53d381/go.mod h1:wof6IrBhVAufc+ZiQo/BB68fKctXiuSEAMbOO29kZdI= github.com/operator-framework/operator-registry v1.26.3 h1:U+HTGgjAT5RCXU2WkDwa525wcqdo97BsO7WfMhwL5MA= @@ -295,6 +299,7 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/internal/source/image.go b/internal/source/image.go new file mode 100644 index 00000000..7ef2e027 --- /dev/null +++ b/internal/source/image.go @@ -0,0 +1,272 @@ +package source + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "strings" + + "github.com/nlepage/go-tarfs" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + applyconfigurationcorev1 "k8s.io/client-go/applyconfigurations/core/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" +) + +type Image struct { + Client client.Client + KubeClient kubernetes.Interface + PodNamespace string + UnpackImage string +} + +const imageCatalogUnpackContainerName = "catalog" + +func (i *Image) Unpack(ctx context.Context, catalog *catalogdv1beta1.Catalog) (*Result, error) { + if catalog.Spec.Source.Type != catalogdv1beta1.SourceTypeImage { + panic(fmt.Sprintf("source type %q is unable to handle specified catalog source type %q", catalogdv1beta1.SourceTypeImage, catalog.Spec.Source.Type)) + } + if catalog.Spec.Source.Image == nil { + return nil, fmt.Errorf("catalog source image configuration is unset") + } + + pod := &corev1.Pod{} + op, err := i.ensureUnpackPod(ctx, catalog, pod) + if err != nil { + return nil, err + } else if op == controllerutil.OperationResultCreated || op == controllerutil.OperationResultUpdated || pod.DeletionTimestamp != nil { + return &Result{State: StatePending}, nil + } + + switch phase := pod.Status.Phase; phase { + case corev1.PodPending: + return pendingImagePodResult(pod), nil + case corev1.PodRunning: + return &Result{State: StateUnpacking}, nil + case corev1.PodFailed: + return nil, i.failedPodResult(ctx, pod) + case corev1.PodSucceeded: + return i.succeededPodResult(ctx, pod) + default: + return nil, i.handleUnexpectedPod(ctx, pod) + } +} + +func (i *Image) ensureUnpackPod(ctx context.Context, catalog *catalogdv1beta1.Catalog, pod *corev1.Pod) (controllerutil.OperationResult, error) { + existingPod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: i.PodNamespace, Name: catalog.Name}} + if err := i.Client.Get(ctx, client.ObjectKeyFromObject(existingPod), existingPod); client.IgnoreNotFound(err) != nil { + return controllerutil.OperationResultNone, err + } + + podApplyConfig := i.getDesiredPodApplyConfig(catalog) + updatedPod, err := i.KubeClient.CoreV1().Pods(i.PodNamespace).Apply(ctx, podApplyConfig, metav1.ApplyOptions{Force: true, FieldManager: "catalogd-core"}) + if err != nil { + if !apierrors.IsInvalid(err) { + return controllerutil.OperationResultNone, err + } + if err := i.Client.Delete(ctx, existingPod); err != nil { + return controllerutil.OperationResultNone, err + } + updatedPod, err = i.KubeClient.CoreV1().Pods(i.PodNamespace).Apply(ctx, podApplyConfig, metav1.ApplyOptions{Force: true, FieldManager: "catalogd-core"}) + if err != nil { + return controllerutil.OperationResultNone, err + } + } + + // make sure the passed in pod value is updated with the latest + // version of the pod + *pod = *updatedPod + + // compare existingPod to newPod and return an appropriate + // OperatorResult value. + newPod := updatedPod.DeepCopy() + unsetNonComparedPodFields(existingPod, newPod) + if equality.Semantic.DeepEqual(existingPod, newPod) { + return controllerutil.OperationResultNone, nil + } + return controllerutil.OperationResultUpdated, nil +} + +func (i *Image) getDesiredPodApplyConfig(catalog *catalogdv1beta1.Catalog) *applyconfigurationcorev1.PodApplyConfiguration { + // TODO: Address unpacker pod allowing root users for image sources + // + // In our current implementation, we are creating a pod that uses the image + // provided by an image source. This pod is not always guaranteed to run as a + // non-root user and thus will fail to initialize if running as root in a PSA + // restricted namespace due to violations. As it currently stands, our compliance + // with PSA is baseline which allows for pods to run as root users. However, + // all RukPak processes and resources, except this unpacker pod for image sources, + // are runnable in a PSA restricted environment. We should consider ways to make + // this PSA definition either configurable or workable in a restricted namespace. + // + // See https://github.com/operator-framework/rukpak/pull/539 for more detail. + containerSecurityContext := applyconfigurationcorev1.SecurityContext(). + WithAllowPrivilegeEscalation(false). + WithCapabilities(applyconfigurationcorev1.Capabilities(). + WithDrop("ALL"), + ) + + podApply := applyconfigurationcorev1.Pod(catalog.Name, i.PodNamespace). + WithLabels(map[string]string{ + "catalogd.operatorframework.io/owner-kind": catalog.Kind, + "catalogd.operatorframework.io/owner-name": catalog.Name, + }). + WithOwnerReferences(v1.OwnerReference(). + WithName(catalog.Name). + WithKind(catalog.Kind). + WithAPIVersion(catalog.APIVersion). + WithUID(catalog.UID). + WithController(true). + WithBlockOwnerDeletion(true), + ). + WithSpec(applyconfigurationcorev1.PodSpec(). + WithAutomountServiceAccountToken(false). + WithRestartPolicy(corev1.RestartPolicyNever). + WithInitContainers(applyconfigurationcorev1.Container(). + WithName("install-unpack"). + WithImage(i.UnpackImage). + WithImagePullPolicy(corev1.PullIfNotPresent). + WithCommand("cp", "-Rv", "/unpack", "/util/bin/unpack"). + WithVolumeMounts(applyconfigurationcorev1.VolumeMount(). + WithName("util"). + WithMountPath("/util/bin"), + ). + WithSecurityContext(containerSecurityContext), + ). + WithContainers(applyconfigurationcorev1.Container(). + WithName(imageCatalogUnpackContainerName). + WithImage(catalog.Spec.Source.Image.Ref). + WithCommand("/util/bin/unpack", "--bundle-dir", "/configs"). + WithVolumeMounts(applyconfigurationcorev1.VolumeMount(). + WithName("util"). + WithMountPath("/util/bin"), + ). + WithSecurityContext(containerSecurityContext), + ). + WithVolumes(applyconfigurationcorev1.Volume(). + WithName("util"). + WithEmptyDir(applyconfigurationcorev1.EmptyDirVolumeSource()), + ). + WithSecurityContext(applyconfigurationcorev1.PodSecurityContext(). + WithRunAsNonRoot(false). + WithSeccompProfile(applyconfigurationcorev1.SeccompProfile(). + WithType(corev1.SeccompProfileTypeRuntimeDefault), + ), + ), + ) + + if catalog.Spec.Source.Image.PullSecret != "" { + podApply.Spec = podApply.Spec.WithImagePullSecrets( + applyconfigurationcorev1.LocalObjectReference().WithName(catalog.Spec.Source.Image.PullSecret), + ) + } + return podApply +} + +func unsetNonComparedPodFields(pods ...*corev1.Pod) { + for _, p := range pods { + p.APIVersion = "" + p.Kind = "" + p.Status = corev1.PodStatus{} + } +} + +func (i *Image) failedPodResult(ctx context.Context, pod *corev1.Pod) error { + logs, err := i.getPodLogs(ctx, pod) + if err != nil { + return fmt.Errorf("unpack failed: failed to retrieve failed pod logs: %v", err) + } + _ = i.Client.Delete(ctx, pod) + return fmt.Errorf("unpack failed: %v", string(logs)) +} + +func (i *Image) succeededPodResult(ctx context.Context, pod *corev1.Pod) (*Result, error) { + catalogFS, err := i.getCatalogContents(ctx, pod) + if err != nil { + return nil, fmt.Errorf("get catalog contents: %v", err) + } + + digest, err := i.getCatalogImageDigest(pod) + if err != nil { + return nil, fmt.Errorf("get catalog image digest: %v", err) + } + + resolvedSource := &catalogdv1beta1.CatalogSource{ + Type: catalogdv1beta1.SourceTypeImage, + Image: &catalogdv1beta1.ImageSource{Ref: digest}, + } + + message := fmt.Sprintf("successfully unpacked the catalog image %q", digest) + + return &Result{FS: catalogFS, ResolvedSource: resolvedSource, State: StateUnpacked, Message: message}, nil +} + +func (i *Image) getCatalogContents(ctx context.Context, pod *corev1.Pod) (fs.FS, error) { + catalogData, err := i.getPodLogs(ctx, pod) + if err != nil { + return nil, fmt.Errorf("get catalog contents: %v", err) + } + bd := struct { + Content []byte `json:"content"` + }{} + + if err := json.Unmarshal(catalogData, &bd); err != nil { + return nil, fmt.Errorf("parse catalog data: %v", err) + } + + gzr, err := gzip.NewReader(bytes.NewReader(bd.Content)) + if err != nil { + return nil, fmt.Errorf("read catalog content gzip: %v", err) + } + return tarfs.New(gzr) +} + +func (i *Image) getCatalogImageDigest(pod *corev1.Pod) (string, error) { + for _, ps := range pod.Status.ContainerStatuses { + if ps.Name == imageCatalogUnpackContainerName && ps.ImageID != "" { + return ps.ImageID, nil + } + } + return "", fmt.Errorf("catalog image digest not found") +} + +func (i *Image) getPodLogs(ctx context.Context, pod *corev1.Pod) ([]byte, error) { + logReader, err := i.KubeClient.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{}).Stream(ctx) + if err != nil { + return nil, fmt.Errorf("get pod logs: %v", err) + } + defer logReader.Close() + buf := &bytes.Buffer{} + if _, err := io.Copy(buf, logReader); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (i *Image) handleUnexpectedPod(ctx context.Context, pod *corev1.Pod) error { + _ = i.Client.Delete(ctx, pod) + return fmt.Errorf("unexpected pod phase: %v", pod.Status.Phase) +} + +func pendingImagePodResult(pod *corev1.Pod) *Result { + var messages []string + for _, cStatus := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) { + if waiting := cStatus.State.Waiting; waiting != nil { + if waiting.Reason == "ErrImagePull" || waiting.Reason == "ImagePullBackOff" { + messages = append(messages, waiting.Message) + } + } + } + return &Result{State: StatePending, Message: strings.Join(messages, "; ")} +} diff --git a/internal/source/unpacker.go b/internal/source/unpacker.go new file mode 100644 index 00000000..8c7c329e --- /dev/null +++ b/internal/source/unpacker.go @@ -0,0 +1,115 @@ +package source + +import ( + "context" + "fmt" + "io/fs" + + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/cluster" + + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" +) + +// TODO: This package is almost entirely copy/pasted from rukpak. We should look +// into whether it is possible to share this code. +// +// TODO: None of the rukpak CRD validations (both static and from the rukpak +// webhooks) related to the source are present here. Which of them do we need? + +// Unpacker unpacks catalog content, either synchronously or asynchronously and +// returns a Result, which conveys information about the progress of unpacking +// the catalog content. +// +// If a Source unpacks content asynchronously, it should register one or more +// watches with a controller to ensure that Bundles referencing this source +// can be reconciled as progress updates are available. +// +// For asynchronous Sources, multiple calls to Unpack should be made until the +// returned result includes state StateUnpacked. +// +// NOTE: A source is meant to be agnostic to specific catalog formats and +// specifications. A source should treat a catalog root directory as an opaque +// file tree and delegate catalog format concerns to catalog parsers. +type Unpacker interface { + Unpack(context.Context, *catalogdv1beta1.Catalog) (*Result, error) +} + +// Result conveys progress information about unpacking catalog content. +type Result struct { + // Bundle contains the full filesystem of a catalog's root directory. + FS fs.FS + + // ResolvedSource is a reproducible view of a Bundle's Source. + // When possible, source implementations should return a ResolvedSource + // that pins the Source such that future fetches of the catalog content can + // be guaranteed to fetch the exact same catalog content as the original + // unpack. + // + // For example, resolved image sources should reference a container image + // digest rather than an image tag, and git sources should reference a + // commit hash rather than a branch or tag. + ResolvedSource *catalogdv1beta1.CatalogSource + + // State is the current state of unpacking the catalog content. + State State + + // Message is contextual information about the progress of unpacking the + // catalog content. + Message string +} + +type State string + +const ( + // StatePending conveys that a request for unpacking a catalog has been + // acknowledged, but not yet started. + StatePending State = "Pending" + + // StateUnpacking conveys that the source is currently unpacking a catalog. + // This state should be used when the catalog contents are being downloaded + // and processed. + StateUnpacking State = "Unpacking" + + // StateUnpacked conveys that the catalog has been successfully unpacked. + StateUnpacked State = "Unpacked" +) + +type unpacker struct { + sources map[catalogdv1beta1.SourceType]Unpacker +} + +// NewUnpacker returns a new composite Source that unpacks catalogs using the source +// mapping provided by the configured sources. +func NewUnpacker(sources map[catalogdv1beta1.SourceType]Unpacker) Unpacker { + return &unpacker{sources: sources} +} + +func (s *unpacker) Unpack(ctx context.Context, catalog *catalogdv1beta1.Catalog) (*Result, error) { + source, ok := s.sources[catalog.Spec.Source.Type] + if !ok { + return nil, fmt.Errorf("source type %q not supported", catalog.Spec.Source.Type) + } + return source.Unpack(ctx, catalog) +} + +// NewDefaultUnpacker returns a new composite Source that unpacks catalogs using +// a default source mapping with built-in implementations of all of the supported +// source types. +// +// TODO: refactor NewDefaultUnpacker due to growing parameter list +func NewDefaultUnpacker(systemNsCluster cluster.Cluster, namespace, unpackImage string) (Unpacker, error) { + cfg := systemNsCluster.GetConfig() + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + return NewUnpacker(map[catalogdv1beta1.SourceType]Unpacker{ + catalogdv1beta1.SourceTypeImage: &Image{ + Client: systemNsCluster.GetClient(), + KubeClient: kubeClient, + PodNamespace: namespace, + UnpackImage: unpackImage, + }, + }), nil +} diff --git a/pkg/apis/core/v1beta1/catalog_types.go b/pkg/apis/core/v1beta1/catalog_types.go index 437d3857..4a710650 100644 --- a/pkg/apis/core/v1beta1/catalog_types.go +++ b/pkg/apis/core/v1beta1/catalog_types.go @@ -20,11 +20,25 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// TODO: The source types, reason, etc. are all copy/pasted from the rukpak +// repository. We should look into whether it is possible to share these. + +type SourceType string + const ( - TypeReady = "Ready" + SourceTypeImage SourceType = "image" + + TypeUnpacked = "Unpacked" - ReasonContentsAvailable = "ContentsAvailable" - ReasonUnpackError = "UnpackError" + ReasonUnpackPending = "UnpackPending" + ReasonUnpacking = "Unpacking" + ReasonUnpackSuccessful = "UnpackSuccessful" + ReasonUnpackFailed = "UnpackFailed" + + PhasePending = "Pending" + PhaseUnpacking = "Unpacking" + PhaseFailing = "Failing" + PhaseUnpacked = "Unpacked" ) //+kubebuilder:object:root=true @@ -52,22 +66,34 @@ type CatalogList struct { // CatalogSpec defines the desired state of Catalog type CatalogSpec struct { - - // Image is the Catalog image that contains Operators' metadata in the FBC format + // Source is the source of a Catalog that contains Operators' metadata in the FBC format // https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs - Image string `json:"image"` - - // PollingInterval is used to determine the time interval between checks of the - // latest index image version. The image is polled to see if a new version of the - // image is available. If available, the latest image is pulled and the cache is - // updated to contain the new content. - PollingInterval *metav1.Duration `json:"pollingInterval,omitempty"` + Source CatalogSource `json:"source"` } // CatalogStatus defines the observed state of Catalog type CatalogStatus struct { // Conditions store the status conditions of the Catalog instances Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + ResolvedSource *CatalogSource `json:"resolvedSource,omitempty"` + Phase string `json:"phase,omitempty"` +} + +// CatalogSource contains the sourcing information for a Catalog +type CatalogSource struct { + // Type defines the kind of Catalog content being sourced. + Type SourceType `json:"type"` + // Image is the catalog image that backs the content of this catalog. + Image *ImageSource `json:"image,omitempty"` +} + +// ImageSource contains information required for sourcing a Catalog from an OCI image +type ImageSource struct { + // Ref contains the reference to a container image containing Catalog contents. + Ref string `json:"ref"` + // PullSecret contains the name of the image pull secret in the namespace that catalogd is deployed. + PullSecret string `json:"pullSecret,omitempty"` } func init() { diff --git a/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/pkg/apis/core/v1beta1/zz_generated.deepcopy.go index 60698fb0..fc788d63 100644 --- a/pkg/apis/core/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -187,15 +187,31 @@ func (in *CatalogList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { +func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { *out = *in - if in.PollingInterval != nil { - in, out := &in.PollingInterval, &out.PollingInterval - *out = new(v1.Duration) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageSource) **out = **in } } +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource. +func (in *CatalogSource) DeepCopy() *CatalogSource { + if in == nil { + return nil + } + out := new(CatalogSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSpec. func (in *CatalogSpec) DeepCopy() *CatalogSpec { if in == nil { @@ -216,6 +232,11 @@ func (in *CatalogStatus) DeepCopyInto(out *CatalogStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ResolvedSource != nil { + in, out := &in.ResolvedSource, &out.ResolvedSource + *out = new(CatalogSource) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogStatus. @@ -263,6 +284,21 @@ func (in *Icon) DeepCopy() *Icon { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSource) DeepCopyInto(out *ImageSource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSource. +func (in *ImageSource) DeepCopy() *ImageSource { + if in == nil { + return nil + } + out := new(ImageSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Package) DeepCopyInto(out *Package) { *out = *in diff --git a/pkg/controllers/core/catalog_controller.go b/pkg/controllers/core/catalog_controller.go index 13715c57..a77a4319 100644 --- a/pkg/controllers/core/catalog_controller.go +++ b/pkg/controllers/core/catalog_controller.go @@ -17,24 +17,16 @@ limitations under the License. package core import ( - "bytes" "context" "fmt" - "io" - "path/filepath" - "time" "github.com/operator-framework/operator-registry/alpha/declcfg" - batchv1 "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apimacherrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -42,15 +34,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + "github.com/operator-framework/catalogd/internal/source" corev1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" ) +// TODO (everettraven): Add unit tests for the CatalogReconciler + // CatalogReconciler reconciles a Catalog object type CatalogReconciler struct { client.Client - Scheme *runtime.Scheme - Cfg *rest.Config - OpmImage string + Unpacker source.Unpacker } //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=catalogs,verbs=get;list;watch;create;update;patch;delete @@ -62,9 +55,8 @@ type CatalogReconciler struct { //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=packages,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=packages/status,verbs=get;update;patch //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=packages/finalizers,verbs=update -//+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=pods,verbs=create;update;patch;delete;get;list;watch //+kubebuilder:rbac:groups=core,resources=pods/log,verbs=get;list;watch -//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=create;get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -114,113 +106,92 @@ func (r *CatalogReconciler) SetupWithManager(mgr ctrl.Manager) error { // for https://github.com/operator-framework/catalogd/issues/6. The fix for // #6 should also remove the usage of `builder.WithPredicates(predicate.GenerationChangedPredicate{})` For(&corev1beta1.Catalog{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Owns(&corev1.Pod{}). Complete(r) } func (r *CatalogReconciler) reconcile(ctx context.Context, catalog *corev1beta1.Catalog) (ctrl.Result, error) { - job, err := r.ensureUnpackJob(ctx, catalog) + unpackResult, err := r.Unpacker.Unpack(ctx, catalog) if err != nil { - updateStatusError(catalog, err) - return ctrl.Result{}, fmt.Errorf("ensuring unpack job: %v", err) + return ctrl.Result{}, updateStatusUnpackFailing(&catalog.Status, fmt.Errorf("source bundle content: %v", err)) } - complete, err := r.checkUnpackJobComplete(ctx, job) - if err != nil { - updateStatusError(catalog, err) - return ctrl.Result{}, fmt.Errorf("ensuring unpack job completed: %v", err) - } - if !complete { - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil - } + switch unpackResult.State { + case source.StatePending: + updateStatusUnpackPending(&catalog.Status, unpackResult) + return ctrl.Result{}, nil + case source.StateUnpacking: + updateStatusUnpacking(&catalog.Status, unpackResult) + return ctrl.Result{}, nil + case source.StateUnpacked: + // TODO: We should check to see if the unpacked result has the same content + // as the already unpacked content. If it does, we should skip this rest + // of the unpacking steps. + + fbc, err := declcfg.LoadFS(unpackResult.FS) + if err != nil { + return ctrl.Result{}, updateStatusUnpackFailing(&catalog.Status, fmt.Errorf("load FBC from filesystem: %v", err)) + } - declCfg, err := r.parseUnpackLogs(ctx, job) - if err != nil { - updateStatusError(catalog, err) - return ctrl.Result{}, err - } + if err := r.createPackages(ctx, fbc, catalog); err != nil { + return ctrl.Result{}, updateStatusUnpackFailing(&catalog.Status, fmt.Errorf("create package objects: %v", err)) + } - if err := r.createPackages(ctx, declCfg, catalog); err != nil { - updateStatusError(catalog, err) - return ctrl.Result{}, err - } + if err := r.createBundleMetadata(ctx, fbc, catalog); err != nil { + return ctrl.Result{}, updateStatusUnpackFailing(&catalog.Status, fmt.Errorf("create bundle metadata objects: %v", err)) + } - if err := r.createBundleMetadata(ctx, declCfg, catalog); err != nil { - updateStatusError(catalog, err) - return ctrl.Result{}, err + updateStatusUnpacked(&catalog.Status, unpackResult) + return ctrl.Result{}, nil + default: + return ctrl.Result{}, updateStatusUnpackFailing(&catalog.Status, fmt.Errorf("unknown unpack state %q: %v", unpackResult.State, err)) } - // update Catalog status as "Ready" since at this point - // all catalog content should be available on cluster - updateStatusReady(catalog) - return ctrl.Result{}, nil } -// ensureUnpackJob will ensure that an unpack job has been created for the given -// Catalog. It will return the unpack job if successful (either the Job already -// exists or one was successfully created) or an error if it is unsuccessful -func (r *CatalogReconciler) ensureUnpackJob(ctx context.Context, catalog *corev1beta1.Catalog) (*batchv1.Job, error) { - // Create the unpack Job manifest for the given Catalog - job := r.unpackJob(catalog) - - // If the Job already exists just return it. If it doesn't then attempt to create it - err := r.Client.Get(ctx, client.ObjectKeyFromObject(job), job) - if err != nil { - if errors.IsNotFound(err) { - if err = r.createUnpackJob(ctx, catalog); err != nil { - return nil, err - } - return job, nil - } - return nil, err - } - - return job, nil +func updateStatusUnpackPending(status *corev1beta1.CatalogStatus, result *source.Result) { + status.ResolvedSource = nil + status.Phase = corev1beta1.PhasePending + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ + Type: corev1beta1.TypeUnpacked, + Status: metav1.ConditionFalse, + Reason: corev1beta1.ReasonUnpackPending, + Message: result.Message, + }) } -// checkUnpackJobComplete will check whether or not an unpack Job has completed. -// It will return a boolean that is true if the Job has successfully completed, -// false if the Job has not completed, or an error if the Job is completed but in a -// "Failed", "FailureTarget", or "Suspended" state or an error is encountered -// when attempting to check the status of the Job -func (r *CatalogReconciler) checkUnpackJobComplete(ctx context.Context, job *batchv1.Job) (bool, error) { - // If the completion time is non-nil that means the Job has completed - if job.Status.CompletionTime != nil { - // Loop through the conditions and check for any fail conditions - for _, cond := range job.Status.Conditions { - if cond.Status == v1.ConditionTrue && cond.Type != batchv1.JobComplete { - return false, fmt.Errorf("unpack job has condition %q with a status of %q", cond.Type, cond.Status) - } - } - // No failures and job has a completion time so job successfully completed - return true, nil - } - return false, nil +func updateStatusUnpacking(status *corev1beta1.CatalogStatus, result *source.Result) { + status.ResolvedSource = nil + status.Phase = corev1beta1.PhaseUnpacking + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ + Type: corev1beta1.TypeUnpacked, + Status: metav1.ConditionFalse, + Reason: corev1beta1.ReasonUnpacking, + Message: result.Message, + }) } -// updateStatusReady will update the Catalog.Status.Conditions -// to have the "Ready" condition with a status of "True" and a Reason -// of "ContentsAvailable". This function is used to signal that a Catalog -// has been successfully unpacked and all catalog contents are available on cluster -func updateStatusReady(catalog *corev1beta1.Catalog) { - meta.SetStatusCondition(&catalog.Status.Conditions, metav1.Condition{ - Type: corev1beta1.TypeReady, - Reason: corev1beta1.ReasonContentsAvailable, +func updateStatusUnpacked(status *corev1beta1.CatalogStatus, result *source.Result) { + status.ResolvedSource = result.ResolvedSource + status.Phase = corev1beta1.PhaseUnpacked + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ + Type: corev1beta1.TypeUnpacked, Status: metav1.ConditionTrue, - Message: "catalog contents have been unpacked and are available on cluster", + Reason: corev1beta1.ReasonUnpackSuccessful, + Message: result.Message, }) } -// updateStatusError will update the Catalog.Status.Conditions -// to have the condition Type "Ready" with a Status of "False" and a Reason -// of "UnpackError". This function is used to signal that a Catalog -// is in an error state and that catalog contents are not available on cluster -func updateStatusError(catalog *corev1beta1.Catalog, err error) { - meta.SetStatusCondition(&catalog.Status.Conditions, metav1.Condition{ - Type: corev1beta1.TypeReady, +func updateStatusUnpackFailing(status *corev1beta1.CatalogStatus, err error) error { + status.ResolvedSource = nil + status.Phase = corev1beta1.PhaseFailing + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ + Type: corev1beta1.TypeUnpacked, Status: metav1.ConditionFalse, - Reason: corev1beta1.ReasonUnpackError, + Reason: corev1beta1.ReasonUnpackFailed, Message: err.Error(), }) + return err } // createBundleMetadata will create a `BundleMetadata` resource for each @@ -260,7 +231,9 @@ func (r *CatalogReconciler) createBundleMetadata(ctx context.Context, declCfg *d }) } - ctrlutil.SetOwnerReference(catalog, &bundleMeta, r.Scheme) + if err := ctrlutil.SetOwnerReference(catalog, &bundleMeta, r.Client.Scheme()); err != nil { + return fmt.Errorf("setting ownerreference on bundlemetadata %q: %w", bundleMeta.Name, err) + } if err := r.Client.Create(ctx, &bundleMeta); err != nil { return fmt.Errorf("creating bundlemetadata %q: %w", bundleMeta.Name, err) @@ -311,7 +284,9 @@ func (r *CatalogReconciler) createPackages(ctx context.Context, declCfg *declcfg } } - ctrlutil.SetOwnerReference(catalog, &pack, r.Scheme) + if err := ctrlutil.SetOwnerReference(catalog, &pack, r.Client.Scheme()); err != nil { + return fmt.Errorf("setting ownerreference on package %q: %w", pack.Name, err) + } if err := r.Client.Create(ctx, &pack); err != nil { return fmt.Errorf("creating package %q: %w", pack.Name, err) @@ -319,120 +294,3 @@ func (r *CatalogReconciler) createPackages(ctx context.Context, declCfg *declcfg } return nil } - -// createUnpackJob creates an unpack Job for the given Catalog -func (r *CatalogReconciler) createUnpackJob(ctx context.Context, cs *corev1beta1.Catalog) error { - job := r.unpackJob(cs) - - ctrlutil.SetOwnerReference(cs, job, r.Scheme) - - if err := r.Client.Create(ctx, job); err != nil { - return fmt.Errorf("creating unpackJob: %w", err) - } - - return nil -} - -// parseUnpackLogs parses the Pod logs from the Pod created by the -// provided unpack Job into a `declcfg.DeclarativeConfig` object -func (r *CatalogReconciler) parseUnpackLogs(ctx context.Context, job *batchv1.Job) (*declcfg.DeclarativeConfig, error) { - clientset, err := kubernetes.NewForConfig(r.Cfg) - if err != nil { - return nil, fmt.Errorf("creating clientset: %w", err) - } - - podsForJob := &v1.PodList{} - err = r.Client.List(ctx, podsForJob, client.MatchingLabels{"job-name": job.GetName()}) - if err != nil { - return nil, fmt.Errorf("listing pods: %w", err) - } - - if len(podsForJob.Items) <= 0 { - return nil, fmt.Errorf("no pods for job") - } - pod := podsForJob.Items[0] - - // TODO: Should we remove this check since we verify the Job has completed before calling this making this redundant? - if pod.Status.Phase != v1.PodSucceeded { - return nil, fmt.Errorf("job pod in phase %q, expected %q", pod.Status.Phase, v1.PodSucceeded) - } - - req := clientset.CoreV1().Pods(job.Namespace).GetLogs(pod.GetName(), &v1.PodLogOptions{}) - podLogs, err := req.Stream(ctx) - if err != nil { - return nil, fmt.Errorf("streaming pod logs: %w", err) - } - defer podLogs.Close() - - logs, err := io.ReadAll(podLogs) - if err != nil { - return nil, fmt.Errorf("reading pod logs: %w", err) - } - - return declcfg.LoadReader(bytes.NewReader(logs)) -} - -// unpackJob creates the manifest for an unpack Job given a Catalog -func (r *CatalogReconciler) unpackJob(cs *corev1beta1.Catalog) *batchv1.Job { - opmVol := "opm" - mountPath := "opmvol/" - return &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "catalogd-system", - Name: fmt.Sprintf("%s-image-unpack", cs.Name), - }, - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "catalogd-system", - Name: fmt.Sprintf("%s-image-unpack-pod", cs.Name), - }, - Spec: v1.PodSpec{ - RestartPolicy: v1.RestartPolicyOnFailure, - InitContainers: []v1.Container{ - { - Image: r.OpmImage, - Name: "initializer", - Command: []string{ - "cp", - "/bin/opm", - filepath.Join(mountPath, "opm"), - }, - VolumeMounts: []v1.VolumeMount{ - { - Name: opmVol, - MountPath: mountPath, - }, - }, - }, - }, - Containers: []v1.Container{ - { - Image: cs.Spec.Image, - Name: "unpacker", - Command: []string{ - filepath.Join(mountPath, "opm"), - "render", - "/configs/", - }, - VolumeMounts: []v1.VolumeMount{ - { - Name: opmVol, - MountPath: mountPath, - }, - }, - }, - }, - Volumes: []v1.Volume{ - { - Name: opmVol, - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }, - }, - }, - }, - }, - } -} diff --git a/pkg/controllers/core/catalog_controller_test.go b/pkg/controllers/core/catalog_controller_test.go new file mode 100644 index 00000000..0776853c --- /dev/null +++ b/pkg/controllers/core/catalog_controller_test.go @@ -0,0 +1,358 @@ +package core_test + +import ( + "context" + "errors" + "fmt" + "os" + "testing/fstest" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/operator-framework/catalogd/internal/source" + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" + "github.com/operator-framework/catalogd/pkg/controllers/core" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var _ source.Unpacker = &MockSource{} + +// MockSource is a utility for mocking out an Unpacker source +type MockSource struct { + // result is the result that should be returned when MockSource.Unpack is called + result *source.Result + + // shouldError determines whether or not the MockSource should return an error when MockSource.Unpack is called + shouldError bool +} + +func (ms *MockSource) Unpack(ctx context.Context, catalog *catalogdv1beta1.Catalog) (*source.Result, error) { + if ms.shouldError { + return nil, errors.New("mocksource error") + } + + return ms.result, nil +} + +var _ = Describe("Catalogd Controller Test", func() { + var ( + ctx context.Context + reconciler *core.CatalogReconciler + mockSource *MockSource + ) + BeforeEach(func() { + ctx = context.Background() + mockSource = &MockSource{} + reconciler = &core.CatalogReconciler{ + Client: cl, + Unpacker: source.NewUnpacker( + map[catalogdv1beta1.SourceType]source.Unpacker{ + catalogdv1beta1.SourceTypeImage: mockSource, + }, + ), + } + }) + + When("the catalog does not exist", func() { + It("returns no error", func() { + res, err := reconciler.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{Name: "non-existent"}}) + Expect(res).To(Equal(ctrl.Result{})) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + When("setting up with controller manager", func() { + var mgr ctrl.Manager + BeforeEach(func() { + var err error + mgr, err = ctrl.NewManager(cfg, manager.Options{Scheme: sch}) + Expect(mgr).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + }) + It("returns no error", func() { + Expect(reconciler.SetupWithManager(mgr)).To(Succeed()) + }) + }) + + When("the catalog exists", func() { + var ( + catalog *catalogdv1beta1.Catalog + cKey types.NamespacedName + ) + BeforeEach(func() { + cKey = types.NamespacedName{Name: fmt.Sprintf("catalogd-test-%s", rand.String(8))} + }) + + When("the catalog specifies an invalid source", func() { + BeforeEach(func() { + By("initializing cluster state") + catalog = &catalogdv1beta1.Catalog{ + ObjectMeta: metav1.ObjectMeta{Name: cKey.Name}, + Spec: catalogdv1beta1.CatalogSpec{ + Source: catalogdv1beta1.CatalogSource{ + Type: "invalid-source", + }, + }, + } + Expect(cl.Create(ctx, catalog)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + By("tearing down cluster state") + Expect(cl.Delete(ctx, catalog)).NotTo(HaveOccurred()) + }) + + It("should set unpacking status to failed and return an error", func() { + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: cKey}) + Expect(res).To(Equal(ctrl.Result{})) + Expect(err).To(HaveOccurred()) + + // get the catalog and ensure status is set properly + cat := &catalogdv1beta1.Catalog{} + Expect(cl.Get(ctx, cKey, cat)).To(Succeed()) + Expect(cat.Status.ResolvedSource).To(BeNil()) + Expect(cat.Status.Phase).To(Equal(catalogdv1beta1.PhaseFailing)) + cond := meta.FindStatusCondition(cat.Status.Conditions, catalogdv1beta1.TypeUnpacked) + Expect(cond).ToNot(BeNil()) + Expect(cond.Reason).To(Equal(catalogdv1beta1.ReasonUnpackFailed)) + Expect(cond.Status).To(Equal(metav1.ConditionFalse)) + }) + }) + + When("the catalog specifies a valid source", func() { + BeforeEach(func() { + By("initializing cluster state") + catalog = &catalogdv1beta1.Catalog{ + ObjectMeta: metav1.ObjectMeta{Name: cKey.Name}, + Spec: catalogdv1beta1.CatalogSpec{ + Source: catalogdv1beta1.CatalogSource{ + Type: "image", + Image: &catalogdv1beta1.ImageSource{ + Ref: "somecatalog:latest", + }, + }, + }, + } + Expect(cl.Create(ctx, catalog)).To(Succeed()) + }) + + AfterEach(func() { + By("tearing down cluster state") + Expect(cl.Delete(ctx, catalog)).NotTo(HaveOccurred()) + }) + + When("unpacker returns source.Result with state == 'Pending'", func() { + BeforeEach(func() { + mockSource.shouldError = false + mockSource.result = &source.Result{State: source.StatePending} + }) + + It("should update status to reflect the pending state", func() { + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: cKey}) + Expect(res).To(Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + + // get the catalog and ensure status is set properly + cat := &catalogdv1beta1.Catalog{} + Expect(cl.Get(ctx, cKey, cat)).To(Succeed()) + Expect(cat.Status.ResolvedSource).To(BeNil()) + Expect(cat.Status.Phase).To(Equal(catalogdv1beta1.PhasePending)) + cond := meta.FindStatusCondition(cat.Status.Conditions, catalogdv1beta1.TypeUnpacked) + Expect(cond).ToNot(BeNil()) + Expect(cond.Reason).To(Equal(catalogdv1beta1.ReasonUnpackPending)) + Expect(cond.Status).To(Equal(metav1.ConditionFalse)) + }) + }) + + When("unpacker returns source.Result with state == 'Unpacking'", func() { + BeforeEach(func() { + mockSource.shouldError = false + mockSource.result = &source.Result{State: source.StateUnpacking} + }) + + It("should update status to reflect the unpacking state", func() { + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: cKey}) + Expect(res).To(Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + + // get the catalog and ensure status is set properly + cat := &catalogdv1beta1.Catalog{} + Expect(cl.Get(ctx, cKey, cat)).To(Succeed()) + Expect(cat.Status.ResolvedSource).To(BeNil()) + Expect(cat.Status.Phase).To(Equal(catalogdv1beta1.PhaseUnpacking)) + cond := meta.FindStatusCondition(cat.Status.Conditions, catalogdv1beta1.TypeUnpacked) + Expect(cond).ToNot(BeNil()) + Expect(cond.Reason).To(Equal(catalogdv1beta1.ReasonUnpacking)) + Expect(cond.Status).To(Equal(metav1.ConditionFalse)) + }) + }) + + When("unpacker returns source.Result with unknown state", func() { + BeforeEach(func() { + mockSource.shouldError = false + mockSource.result = &source.Result{State: "unknown"} + }) + + It("should set unpacking status to failed and return an error", func() { + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: cKey}) + Expect(res).To(Equal(ctrl.Result{})) + Expect(err).To(HaveOccurred()) + + // get the catalog and ensure status is set properly + cat := &catalogdv1beta1.Catalog{} + Expect(cl.Get(ctx, cKey, cat)).To(Succeed()) + Expect(cat.Status.ResolvedSource).To(BeNil()) + Expect(cat.Status.Phase).To(Equal(catalogdv1beta1.PhaseFailing)) + cond := meta.FindStatusCondition(cat.Status.Conditions, catalogdv1beta1.TypeUnpacked) + Expect(cond).ToNot(BeNil()) + Expect(cond.Reason).To(Equal(catalogdv1beta1.ReasonUnpackFailed)) + Expect(cond.Status).To(Equal(metav1.ConditionFalse)) + }) + }) + + When("unpacker returns source.Result with state == 'Unpacked'", func() { + var ( + testBundleName = "webhook-operator.v0.0.1" + testBundleImage = "quay.io/olmtest/webhook-operator-bundle:0.0.3" + testBundleRelatedImageName = "test" + testBundleRelatedImageImage = "testimage:latest" + testBundleObjectData = "dW5pbXBvcnRhbnQK" + testPackageDefaultChannel = "preview" + testPackageName = "webhook-operator" + testChannelName = "preview" + testPackage = fmt.Sprintf(testPackageTemplate, testPackageDefaultChannel, testPackageName) + testBundle = fmt.Sprintf(testBundleTemplate, testBundleImage, testBundleName, testPackageName, testBundleRelatedImageName, testBundleRelatedImageImage, testBundleObjectData) + testChannel = fmt.Sprintf(testChannelTemplate, testPackageName, testChannelName, testBundleName) + ) + BeforeEach(func() { + filesys := &fstest.MapFS{ + "bundle.yaml": &fstest.MapFile{Data: []byte(testBundle), Mode: os.ModePerm}, + "package.yaml": &fstest.MapFile{Data: []byte(testPackage), Mode: os.ModePerm}, + "channel.yaml": &fstest.MapFile{Data: []byte(testChannel), Mode: os.ModePerm}, + } + + mockSource.shouldError = false + mockSource.result = &source.Result{ + ResolvedSource: &catalog.Spec.Source, + State: source.StateUnpacked, + FS: filesys, + } + + // reconcile + res, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: cKey}) + Expect(res).To(Equal(ctrl.Result{})) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + // clean up package + pkg := &catalogdv1beta1.Package{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPackageName, + }, + } + Expect(cl.Delete(ctx, pkg)).NotTo(HaveOccurred()) + + // clean up bundlemetadata + bm := &catalogdv1beta1.BundleMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: testBundleName, + }, + } + Expect(cl.Delete(ctx, bm)).NotTo(HaveOccurred()) + }) + + It("should set unpacking status to 'unpacked'", func() { + // get the catalog and ensure status is set properly + cat := &catalogdv1beta1.Catalog{} + Expect(cl.Get(ctx, cKey, cat)).ToNot(HaveOccurred()) + Expect(cat.Status.ResolvedSource).ToNot(BeNil()) + Expect(cat.Status.Phase).To(Equal(catalogdv1beta1.PhaseUnpacked)) + cond := meta.FindStatusCondition(cat.Status.Conditions, catalogdv1beta1.TypeUnpacked) + Expect(cond).ToNot(BeNil()) + Expect(cond.Reason).To(Equal(catalogdv1beta1.ReasonUnpackSuccessful)) + Expect(cond.Status).To(Equal(metav1.ConditionTrue)) + }) + + It("should create BundleMetadata resources", func() { + // validate bundlemetadata resources + bundlemetadatas := &catalogdv1beta1.BundleMetadataList{} + Expect(cl.List(ctx, bundlemetadatas)).To(Succeed()) + Expect(bundlemetadatas.Items).To(HaveLen(1)) + bundlemetadata := bundlemetadatas.Items[0] + Expect(bundlemetadata.Name).To(Equal(testBundleName)) + Expect(bundlemetadata.Spec.Image).To(Equal(testBundleImage)) + Expect(bundlemetadata.Spec.CatalogSource).To(Equal(catalog.Name)) + Expect(bundlemetadata.Spec.Package).To(Equal(testPackageName)) + Expect(bundlemetadata.Spec.RelatedImages).To(HaveLen(1)) + Expect(bundlemetadata.Spec.RelatedImages[0].Name).To(Equal(testBundleRelatedImageName)) + Expect(bundlemetadata.Spec.RelatedImages[0].Image).To(Equal(testBundleRelatedImageImage)) + Expect(bundlemetadata.Spec.Properties).To(HaveLen(1)) + }) + + It("should create Package resources", func() { + // validate package resources + packages := &catalogdv1beta1.PackageList{} + Expect(cl.List(ctx, packages)).To(Succeed()) + Expect(packages.Items).To(HaveLen(1)) + pack := packages.Items[0] + Expect(pack.Name).To(Equal(testPackageName)) + Expect(pack.Spec.DefaultChannel).To(Equal(testPackageDefaultChannel)) + Expect(pack.Spec.CatalogSource).To(Equal(catalog.Name)) + Expect(pack.Spec.Channels).To(HaveLen(1)) + Expect(pack.Spec.Channels[0].Name).To(Equal(testChannelName)) + Expect(pack.Spec.Channels[0].Entries).To(HaveLen(1)) + Expect(Expect(pack.Spec.Channels[0].Entries[0].Name).To(Equal(testBundleName))) + }) + }) + }) + + }) +}) + +// The below string templates each represent a YAML file consisting +// of file-based catalog objects to build a minimal catalog consisting of +// one package, with one channel, and one bundle in that channel. +// To learn more about File-Based Catalogs and the different objects, view the +// documentation at https://olm.operatorframework.io/docs/reference/file-based-catalogs/. +// The reasoning behind having these as a template is to parameterize different +// fields to use custom values during testing and verifying to ensure that the BundleMetadata +// and Package resources created by the Catalog controller have the appropriate values. +// Having the parameterized fields allows us to easily change the values that are used in +// the tests by changing them in one place as opposed to manually changing many string literals +// throughout the code. +const testBundleTemplate = `--- +image: %s +name: %s +schema: olm.bundle +package: %s +relatedImages: + - name: %s + image: %s +properties: + - type: olm.bundle.object + value: + data: %s + - type: some.other + value: + data: arbitrary-info +` + +const testPackageTemplate = `--- +defaultChannel: %s +name: %s +schema: olm.package +` + +const testChannelTemplate = `--- +schema: olm.channel +package: %s +name: %s +entries: + - name: %s +` diff --git a/pkg/controllers/core/suite_test.go b/pkg/controllers/core/suite_test.go new file mode 100644 index 00000000..6ec38f9b --- /dev/null +++ b/pkg/controllers/core/suite_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core_test + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + catalogdv1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + cfg *rest.Config + cl client.Client + sch *runtime.Scheme + testEnv *envtest.Environment +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + sch = runtime.NewScheme() + Expect(catalogdv1beta1.AddToScheme(sch)).To(Succeed()) + Expect(corev1.AddToScheme(sch)).To(Succeed()) + + cl, err = client.New(cfg, client.Options{Scheme: sch}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + Expect(testEnv.Stop()).To(Succeed()) +})