From 0351d9175acc6c31adf87a68383f955974433ffb Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Fri, 5 May 2023 15:03:05 -0400 Subject: [PATCH 1/7] rename CatalogSource to Catalog Signed-off-by: Bryce Palmer --- cmd/manager/main.go | 2 +- ...talogd.operatorframework.io_catalogs.yaml} | 18 +++--- pkg/apis/core/v1beta1/catalogsource_types.go | 26 ++++----- .../core/v1beta1/zz_generated.deepcopy.go | 38 ++++++------ .../core/catalogsource_controller.go | 58 +++++++++---------- 5 files changed, 71 insertions(+), 71 deletions(-) rename config/crd/bases/{catalogd.operatorframework.io_catalogsources.yaml => catalogd.operatorframework.io_catalogs.yaml} (93%) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index d715166a..9feca941 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -94,7 +94,7 @@ func main() { os.Exit(1) } - if err = (&corecontrollers.CatalogSourceReconciler{ + if err = (&corecontrollers.CatalogReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Cfg: mgr.GetConfig(), diff --git a/config/crd/bases/catalogd.operatorframework.io_catalogsources.yaml b/config/crd/bases/catalogd.operatorframework.io_catalogs.yaml similarity index 93% rename from config/crd/bases/catalogd.operatorframework.io_catalogsources.yaml rename to config/crd/bases/catalogd.operatorframework.io_catalogs.yaml index a376a4c4..e491c793 100644 --- a/config/crd/bases/catalogd.operatorframework.io_catalogsources.yaml +++ b/config/crd/bases/catalogd.operatorframework.io_catalogs.yaml @@ -4,20 +4,20 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.4 - name: catalogsources.catalogd.operatorframework.io + name: catalogs.catalogd.operatorframework.io spec: group: catalogd.operatorframework.io names: - kind: CatalogSource - listKind: CatalogSourceList - plural: catalogsources - singular: catalogsource + kind: Catalog + listKind: CatalogList + plural: catalogs + singular: catalog scope: Cluster versions: - name: v1beta1 schema: openAPIV3Schema: - description: CatalogSource is the Schema for the catalogsources API + description: Catalog is the Schema for the Catalogs API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -32,7 +32,7 @@ spec: metadata: type: object spec: - description: CatalogSourceSpec defines the desired state of CatalogSource + description: CatalogSpec defines the desired state of Catalog properties: image: description: Image is the Catalog image that contains Operators' metadata @@ -49,10 +49,10 @@ spec: - image type: object status: - description: CatalogSourceStatus defines the observed state of CatalogSource + description: CatalogStatus defines the observed state of Catalog properties: conditions: - description: Conditions store the status conditions of the CatalogSource + description: Conditions store the status conditions of the Catalog instances items: description: "Condition contains details for one aspect of the current diff --git a/pkg/apis/core/v1beta1/catalogsource_types.go b/pkg/apis/core/v1beta1/catalogsource_types.go index 8b4412f0..437d3857 100644 --- a/pkg/apis/core/v1beta1/catalogsource_types.go +++ b/pkg/apis/core/v1beta1/catalogsource_types.go @@ -31,27 +31,27 @@ const ( //+kubebuilder:resource:scope=Cluster //+kubebuilder:subresource:status -// CatalogSource is the Schema for the catalogsources API -type CatalogSource struct { +// Catalog is the Schema for the Catalogs API +type Catalog struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec CatalogSourceSpec `json:"spec,omitempty"` - Status CatalogSourceStatus `json:"status,omitempty"` + Spec CatalogSpec `json:"spec,omitempty"` + Status CatalogStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true -// CatalogSourceList contains a list of CatalogSource -type CatalogSourceList struct { +// CatalogList contains a list of Catalog +type CatalogList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []CatalogSource `json:"items"` + Items []Catalog `json:"items"` } -// CatalogSourceSpec defines the desired state of CatalogSource -type CatalogSourceSpec struct { +// CatalogSpec defines the desired state of Catalog +type CatalogSpec struct { // Image is the Catalog image that contains Operators' metadata in the FBC format // https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs @@ -64,12 +64,12 @@ type CatalogSourceSpec struct { PollingInterval *metav1.Duration `json:"pollingInterval,omitempty"` } -// CatalogSourceStatus defines the observed state of CatalogSource -type CatalogSourceStatus struct { - // Conditions store the status conditions of the CatalogSource instances +// CatalogStatus defines the observed state of Catalog +type CatalogStatus struct { + // Conditions store the status conditions of the Catalog instances Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } func init() { - SchemeBuilder.Register(&CatalogSource{}, &CatalogSourceList{}) + SchemeBuilder.Register(&Catalog{}, &CatalogList{}) } diff --git a/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/pkg/apis/core/v1beta1/zz_generated.deepcopy.go index 3c9835ee..60698fb0 100644 --- a/pkg/apis/core/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -128,7 +128,7 @@ func (in *BundleMetadataStatus) DeepCopy() *BundleMetadataStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { +func (in *Catalog) DeepCopyInto(out *Catalog) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -136,18 +136,18 @@ func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource. -func (in *CatalogSource) DeepCopy() *CatalogSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Catalog. +func (in *Catalog) DeepCopy() *Catalog { if in == nil { return nil } - out := new(CatalogSource) + out := new(Catalog) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CatalogSource) DeepCopyObject() runtime.Object { +func (in *Catalog) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -155,31 +155,31 @@ func (in *CatalogSource) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSourceList) DeepCopyInto(out *CatalogSourceList) { +func (in *CatalogList) DeepCopyInto(out *CatalogList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]CatalogSource, len(*in)) + *out = make([]Catalog, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceList. -func (in *CatalogSourceList) DeepCopy() *CatalogSourceList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogList. +func (in *CatalogList) DeepCopy() *CatalogList { if in == nil { return nil } - out := new(CatalogSourceList) + out := new(CatalogList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CatalogSourceList) DeepCopyObject() runtime.Object { +func (in *CatalogList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -187,7 +187,7 @@ func (in *CatalogSourceList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { +func (in *CatalogSpec) DeepCopyInto(out *CatalogSpec) { *out = *in if in.PollingInterval != nil { in, out := &in.PollingInterval, &out.PollingInterval @@ -196,18 +196,18 @@ func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec. -func (in *CatalogSourceSpec) DeepCopy() *CatalogSourceSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSpec. +func (in *CatalogSpec) DeepCopy() *CatalogSpec { if in == nil { return nil } - out := new(CatalogSourceSpec) + out := new(CatalogSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { +func (in *CatalogStatus) DeepCopyInto(out *CatalogStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions @@ -218,12 +218,12 @@ func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus. -func (in *CatalogSourceStatus) DeepCopy() *CatalogSourceStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogStatus. +func (in *CatalogStatus) DeepCopy() *CatalogStatus { if in == nil { return nil } - out := new(CatalogSourceStatus) + out := new(CatalogStatus) in.DeepCopyInto(out) return out } diff --git a/pkg/controllers/core/catalogsource_controller.go b/pkg/controllers/core/catalogsource_controller.go index 00b757a7..cbb7f692 100644 --- a/pkg/controllers/core/catalogsource_controller.go +++ b/pkg/controllers/core/catalogsource_controller.go @@ -45,17 +45,17 @@ import ( corev1beta1 "github.com/operator-framework/catalogd/pkg/apis/core/v1beta1" ) -// CatalogSourceReconciler reconciles a CatalogSource object -type CatalogSourceReconciler struct { +// CatalogReconciler reconciles a Catalog object +type CatalogReconciler struct { client.Client Scheme *runtime.Scheme Cfg *rest.Config OpmImage string } -//+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=catalogsources,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=catalogsources/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=catalogsources/finalizers,verbs=update +//+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=catalogs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=catalogs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=catalogs/finalizers,verbs=update //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=bundlemetadata,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=bundlemetadata/status,verbs=get;update;patch //+kubebuilder:rbac:groups=catalogd.operatorframework.io,resources=bundlemetadata/finalizers,verbs=update @@ -71,11 +71,11 @@ type CatalogSourceReconciler struct { // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile -func (r *CatalogSourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *CatalogReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { // TODO: Where and when should we be logging errors and at which level? _ = log.FromContext(ctx).WithName("catalogd-controller") - existingCatsrc := corev1beta1.CatalogSource{} + existingCatsrc := corev1beta1.Catalog{} if err := r.Client.Get(ctx, req.NamespacedName, &existingCatsrc); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -94,7 +94,7 @@ func (r *CatalogSourceReconciler) Reconcile(ctx context.Context, req ctrl.Reques return res, apimacherrors.NewAggregate([]error{reconcileErr, updateErr}) } } - existingCatsrc.Status, reconciledCatsrc.Status = corev1beta1.CatalogSourceStatus{}, corev1beta1.CatalogSourceStatus{} + existingCatsrc.Status, reconciledCatsrc.Status = corev1beta1.CatalogStatus{}, corev1beta1.CatalogStatus{} if !equality.Semantic.DeepEqual(existingCatsrc, reconciledCatsrc) { if updateErr := r.Client.Update(ctx, reconciledCatsrc); updateErr != nil { return res, apimacherrors.NewAggregate([]error{reconcileErr, updateErr}) @@ -104,7 +104,7 @@ func (r *CatalogSourceReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // SetupWithManager sets up the controller with the Manager. -func (r *CatalogSourceReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *CatalogReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). // TODO: Due to us not having proper error handling, // not having this results in the controller getting into @@ -113,11 +113,11 @@ func (r *CatalogSourceReconciler) SetupWithManager(mgr ctrl.Manager) error { // even though they already exist. This should be resolved by the fix // for https://github.com/operator-framework/catalogd/issues/6. The fix for // #6 should also remove the usage of `builder.WithPredicates(predicate.GenerationChangedPredicate{})` - For(&corev1beta1.CatalogSource{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + For(&corev1beta1.Catalog{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Complete(r) } -func (r *CatalogSourceReconciler) reconcile(ctx context.Context, catalogSource *corev1beta1.CatalogSource) (ctrl.Result, error) { +func (r *CatalogReconciler) reconcile(ctx context.Context, catalogSource *corev1beta1.Catalog) (ctrl.Result, error) { job, err := r.ensureUnpackJob(ctx, catalogSource) if err != nil { updateStatusError(catalogSource, err) @@ -149,17 +149,17 @@ func (r *CatalogSourceReconciler) reconcile(ctx context.Context, catalogSource * return ctrl.Result{}, err } - // update CatalogSource status as "Ready" since at this point + // update Catalog status as "Ready" since at this point // all catalog content should be available on cluster updateStatusReady(catalogSource) return ctrl.Result{}, nil } // ensureUnpackJob will ensure that an unpack job has been created for the given -// CatalogSource. It will return the unpack job if successful (either the Job already +// Catalog. It will return the unpack job if successful (either the Job already // exists or one was successfully created) or an error if it is unsuccessful -func (r *CatalogSourceReconciler) ensureUnpackJob(ctx context.Context, catalogSource *corev1beta1.CatalogSource) (*batchv1.Job, error) { - // Create the unpack Job manifest for the given CatalogSource +func (r *CatalogReconciler) ensureUnpackJob(ctx context.Context, catalogSource *corev1beta1.Catalog) (*batchv1.Job, error) { + // Create the unpack Job manifest for the given Catalog job := r.unpackJob(catalogSource) // If the Job already exists just return it. If it doesn't then attempt to create it @@ -182,7 +182,7 @@ func (r *CatalogSourceReconciler) ensureUnpackJob(ctx context.Context, catalogSo // false if the Job has not completed, or an error if the Job is completed but in a // "Failed", "FailureTarget", or "Suspended" state or an error is encountered // when attempting to check the status of the Job -func (r *CatalogSourceReconciler) checkUnpackJobComplete(ctx context.Context, job *batchv1.Job) (bool, error) { +func (r *CatalogReconciler) checkUnpackJobComplete(ctx context.Context, job *batchv1.Job) (bool, error) { // If the completion time is non-nil that means the Job has completed if job.Status.CompletionTime != nil { // Loop through the conditions and check for any fail conditions @@ -197,11 +197,11 @@ func (r *CatalogSourceReconciler) checkUnpackJobComplete(ctx context.Context, jo return false, nil } -// updateStatusReady will update the CatalogSource.Status.Conditions +// updateStatusReady will update the Catalog.Status.Conditions // to have the "Ready" condition with a status of "True" and a Reason -// of "ContentsAvailable". This function is used to signal that a CatalogSource +// of "ContentsAvailable". This function is used to signal that a Catalog // has been successfully unpacked and all catalog contents are available on cluster -func updateStatusReady(catalogSource *corev1beta1.CatalogSource) { +func updateStatusReady(catalogSource *corev1beta1.Catalog) { meta.SetStatusCondition(&catalogSource.Status.Conditions, metav1.Condition{ Type: corev1beta1.TypeReady, Reason: corev1beta1.ReasonContentsAvailable, @@ -210,11 +210,11 @@ func updateStatusReady(catalogSource *corev1beta1.CatalogSource) { }) } -// updateStatusError will update the CatalogSource.Status.Conditions +// updateStatusError will update the Catalog.Status.Conditions // to have the condition Type "Ready" with a Status of "False" and a Reason -// of "UnpackError". This function is used to signal that a CatalogSource +// of "UnpackError". This function is used to signal that a Catalog // is in an error state and that catalog contents are not available on cluster -func updateStatusError(catalogSource *corev1beta1.CatalogSource, err error) { +func updateStatusError(catalogSource *corev1beta1.Catalog, err error) { meta.SetStatusCondition(&catalogSource.Status.Conditions, metav1.Condition{ Type: corev1beta1.TypeReady, Status: metav1.ConditionFalse, @@ -226,7 +226,7 @@ func updateStatusError(catalogSource *corev1beta1.CatalogSource, err error) { // createBundleMetadata will create a `BundleMetadata` resource for each // "olm.bundle" object that exists for the given catalog contents. Returns an // error if any are encountered. -func (r *CatalogSourceReconciler) createBundleMetadata(ctx context.Context, declCfg *declcfg.DeclarativeConfig, catalogSource *corev1beta1.CatalogSource) error { +func (r *CatalogReconciler) createBundleMetadata(ctx context.Context, declCfg *declcfg.DeclarativeConfig, catalogSource *corev1beta1.Catalog) error { for _, bundle := range declCfg.Bundles { bundleMeta := corev1beta1.BundleMetadata{ ObjectMeta: metav1.ObjectMeta{ @@ -274,7 +274,7 @@ func (r *CatalogSourceReconciler) createBundleMetadata(ctx context.Context, decl // "olm.package" object that exists for the given catalog contents. // `Package.Spec.Channels` is populated by filtering all "olm.channel" objects // where the "packageName" == `Package.Name`. Returns an error if any are encountered. -func (r *CatalogSourceReconciler) createPackages(ctx context.Context, declCfg *declcfg.DeclarativeConfig, catalogSource *corev1beta1.CatalogSource) error { +func (r *CatalogReconciler) createPackages(ctx context.Context, declCfg *declcfg.DeclarativeConfig, catalogSource *corev1beta1.Catalog) error { for _, pkg := range declCfg.Packages { pack := corev1beta1.Package{ ObjectMeta: metav1.ObjectMeta{ @@ -320,8 +320,8 @@ func (r *CatalogSourceReconciler) createPackages(ctx context.Context, declCfg *d return nil } -// createUnpackJob creates an unpack Job for the given CatalogSource -func (r *CatalogSourceReconciler) createUnpackJob(ctx context.Context, cs *corev1beta1.CatalogSource) error { +// createUnpackJob creates an unpack Job for the given Catalog +func (r *CatalogReconciler) createUnpackJob(ctx context.Context, cs *corev1beta1.Catalog) error { job := r.unpackJob(cs) ctrlutil.SetOwnerReference(cs, job, r.Scheme) @@ -335,7 +335,7 @@ func (r *CatalogSourceReconciler) createUnpackJob(ctx context.Context, cs *corev // parseUnpackLogs parses the Pod logs from the Pod created by the // provided unpack Job into a `declcfg.DeclarativeConfig` object -func (r *CatalogSourceReconciler) parseUnpackLogs(ctx context.Context, job *batchv1.Job) (*declcfg.DeclarativeConfig, error) { +func (r *CatalogReconciler) parseUnpackLogs(ctx context.Context, job *batchv1.Job) (*declcfg.DeclarativeConfig, error) { clientset, err := kubernetes.NewForConfig(r.Cfg) if err != nil { return nil, fmt.Errorf("creating clientset: %w", err) @@ -372,8 +372,8 @@ func (r *CatalogSourceReconciler) parseUnpackLogs(ctx context.Context, job *batc return declcfg.LoadReader(bytes.NewReader(logs)) } -// unpackJob creates the manifest for an unpack Job given a CatalogSource -func (r *CatalogSourceReconciler) unpackJob(cs *corev1beta1.CatalogSource) *batchv1.Job { +// unpackJob creates the manifest for an unpack Job given a Catalog +func (r *CatalogReconciler) unpackJob(cs *corev1beta1.Catalog) *batchv1.Job { opmVol := "opm" mountPath := "opmvol/" return &batchv1.Job{ From 7e6cc6807d6ccd5017c80d850a5638c1a480d96d Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Fri, 5 May 2023 15:05:33 -0400 Subject: [PATCH 2/7] make generate Signed-off-by: Bryce Palmer --- config/rbac/role.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5383b7d5..3e8083b7 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -42,7 +42,7 @@ rules: - apiGroups: - catalogd.operatorframework.io resources: - - catalogsources + - catalogs verbs: - create - delete @@ -54,13 +54,13 @@ rules: - apiGroups: - catalogd.operatorframework.io resources: - - catalogsources/finalizers + - catalogs/finalizers verbs: - update - apiGroups: - catalogd.operatorframework.io resources: - - catalogsources/status + - catalogs/status verbs: - get - patch From fb1026dd4a8afce68d2d1d89c8a95298a6e81135 Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Fri, 5 May 2023 15:08:07 -0400 Subject: [PATCH 3/7] fix manifests and rename file Signed-off-by: Bryce Palmer --- config/crd/kustomization.yaml | 2 +- .../core/v1beta1/{catalogsource_types.go => catalog_types.go} | 0 .../core/{catalogsource_controller.go => catalog_controller.go} | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename pkg/apis/core/v1beta1/{catalogsource_types.go => catalog_types.go} (100%) rename pkg/controllers/core/{catalogsource_controller.go => catalog_controller.go} (100%) diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 5d7c1db6..78113776 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -4,5 +4,5 @@ resources: - bases/catalogd.operatorframework.io_bundlemetadata.yaml - bases/catalogd.operatorframework.io_packages.yaml -- bases/catalogd.operatorframework.io_catalogsources.yaml +- bases/catalogd.operatorframework.io_catalogs.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/pkg/apis/core/v1beta1/catalogsource_types.go b/pkg/apis/core/v1beta1/catalog_types.go similarity index 100% rename from pkg/apis/core/v1beta1/catalogsource_types.go rename to pkg/apis/core/v1beta1/catalog_types.go diff --git a/pkg/controllers/core/catalogsource_controller.go b/pkg/controllers/core/catalog_controller.go similarity index 100% rename from pkg/controllers/core/catalogsource_controller.go rename to pkg/controllers/core/catalog_controller.go From 728b82ccca6e0b025e65d8fd5ad7ee04013c16c6 Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Fri, 5 May 2023 15:10:38 -0400 Subject: [PATCH 4/7] update sample Signed-off-by: Bryce Palmer --- config/samples/core_v1beta1_catalogsource.yaml | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 config/samples/core_v1beta1_catalogsource.yaml diff --git a/config/samples/core_v1beta1_catalogsource.yaml b/config/samples/core_v1beta1_catalogsource.yaml deleted file mode 100644 index b34f3750..00000000 --- a/config/samples/core_v1beta1_catalogsource.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: catalogd.operatorframework.io/v1beta1 -kind: CatalogSource -metadata: - labels: - app.kubernetes.io/name: catalogsource - app.kubernetes.io/instance: catalogsource-sample - app.kubernetes.io/part-of: catalogd - app.kuberentes.io/managed-by: kustomize - app.kubernetes.io/created-by: catalogd - name: catalogsource-sample -spec: - image: quay.io/operatorhubio/catalog:latest - pollingInterval: 45m From 1c0b076f8a6126c5e1bb9d96cd5cd0d6052ad7e7 Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Fri, 5 May 2023 15:10:46 -0400 Subject: [PATCH 5/7] update sample Signed-off-by: Bryce Palmer --- config/samples/core_v1beta1_catalog.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 config/samples/core_v1beta1_catalog.yaml diff --git a/config/samples/core_v1beta1_catalog.yaml b/config/samples/core_v1beta1_catalog.yaml new file mode 100644 index 00000000..98a73e0a --- /dev/null +++ b/config/samples/core_v1beta1_catalog.yaml @@ -0,0 +1,13 @@ +apiVersion: catalogd.operatorframework.io/v1beta1 +kind: Catalog +metadata: + labels: + app.kubernetes.io/name: catalog + app.kubernetes.io/instance: catalog-sample + app.kubernetes.io/part-of: catalogd + app.kuberentes.io/managed-by: kustomize + app.kubernetes.io/created-by: catalogd + name: catalog-sample +spec: + image: quay.io/operatorhubio/catalog:latest + pollingInterval: 45m From e2e4b25d1fa9604b8ce0a74de113e36881371eb4 Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Tue, 9 May 2023 07:13:33 -0400 Subject: [PATCH 6/7] catalogsource --> catalog Signed-off-by: Bryce Palmer --- Makefile | 4 +- README.md | 10 ++-- cmd/manager/main.go | 2 +- ...d.operatorframework.io_bundlemetadata.yaml | 2 +- ...atalogd.operatorframework.io_packages.yaml | 2 +- hack/scripts/generate-asciidemo.sh | 16 +++---- pkg/apis/core/v1beta1/bundlemetadata_types.go | 2 +- pkg/apis/core/v1beta1/package_types.go | 2 +- pkg/controllers/core/catalog_controller.go | 46 +++++++++---------- 9 files changed, 43 insertions(+), 43 deletions(-) diff --git a/Makefile b/Makefile index d09952d3..6f5daaa8 100644 --- a/Makefile +++ b/Makefile @@ -136,12 +136,12 @@ install: docker-build-controller kind-load deploy wait ## Install local catalogd # TODO: When the apiserver is working properly, add this line back after the manager edit: # cd config/apiserver && $(KUSTOMIZE) edit set image apiserver=${SERVER_IMG}:${IMG_TAG} .PHONY: deploy -deploy: kustomize ## Deploy CatalogSource controller and ApiServer to the K8s cluster specified in ~/.kube/config. +deploy: kustomize ## Deploy Catalog controller and ApiServer to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${CONTROLLER_IMG}:${IMG_TAG} $(KUSTOMIZE) build config/default | kubectl apply -f - .PHONY: undeploy -undeploy: kustomize ## Undeploy CatalogSource controller and ApiServer from the K8s cluster specified in ~/.kube/config. +undeploy: kustomize ## Undeploy Catalog controller and ApiServer from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=true -f - .PHONY: uninstall diff --git a/README.md b/README.md index a39349b9..673bcd59 100644 --- a/README.md +++ b/README.md @@ -14,11 +14,11 @@ $ kind create cluster $ kubectl apply -f https://github.com/operator-framework/catalogd/config/crd/bases/ $ kubectl apply -f https://github.com/operator-framework/catalogd/config/ $ kubectl create ns test -$ kubectl apply -f config/samples/catalogsource.yaml +$ kubectl apply -f config/samples/core_v1beta1_catalog.yaml -$ kubectl get catalogsource -n test +$ kubectl get catalog -n test NAME AGE -catalogsource-sample 98s +catalog-sample 98s $ kubectl get bundlemetadata -n test NAME AGE @@ -109,7 +109,7 @@ kubectl apply -f config/crd/bases/ ```sh kubectl apply -f config/ ``` -- Create the sample CatalogSource (this will trigger the reconciliation loop): +- Create the sample Catalog (this will trigger the reconciliation loop): ```sh -kubectl apply -f config/samples/catalogsource.yaml +kubectl apply -f config/samples/core_v1beta1_catalog.yaml ``` diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 9feca941..3ded2f6e 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -100,7 +100,7 @@ func main() { Cfg: mgr.GetConfig(), OpmImage: opmImage, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "CatalogSource") + setupLog.Error(err, "unable to create controller", "controller", "Catalog") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/config/crd/bases/catalogd.operatorframework.io_bundlemetadata.yaml b/config/crd/bases/catalogd.operatorframework.io_bundlemetadata.yaml index bdd08509..404333e4 100644 --- a/config/crd/bases/catalogd.operatorframework.io_bundlemetadata.yaml +++ b/config/crd/bases/catalogd.operatorframework.io_bundlemetadata.yaml @@ -35,7 +35,7 @@ spec: description: BundleMetadataSpec defines the desired state of BundleMetadata properties: catalogSource: - description: CatalogSource is the name of the CatalogSource that provides + description: CatalogSource is the name of the Catalog that provides this bundle type: string image: diff --git a/config/crd/bases/catalogd.operatorframework.io_packages.yaml b/config/crd/bases/catalogd.operatorframework.io_packages.yaml index e1d71054..d4a0ce19 100644 --- a/config/crd/bases/catalogd.operatorframework.io_packages.yaml +++ b/config/crd/bases/catalogd.operatorframework.io_packages.yaml @@ -35,7 +35,7 @@ spec: description: PackageSpec defines the desired state of Package properties: catalogSource: - description: CatalogSource is the name of the CatalogSource this package + description: CatalogSource is the name of the Catalog this package belongs to type: string catalogSourceDisplayName: diff --git a/hack/scripts/generate-asciidemo.sh b/hack/scripts/generate-asciidemo.sh index 9f62841e..46d6b2ed 100755 --- a/hack/scripts/generate-asciidemo.sh +++ b/hack/scripts/generate-asciidemo.sh @@ -20,18 +20,18 @@ function run() { sleep 10 typeline "make install" sleep 10 - # inspect crds (catalogsource, package, bundlemetadata) - #k get crds catalogsources.catalogd.operatorframework.io + # inspect crds (catalog, package, bundlemetadata) + #k get crds catalogs.catalogd.operatorframework.io #k get crds packages.catalogd.operatorframework.io #k get crds bundlemetadata.catalogd.operatorframework.io - #typeline 'kubectl get crds -A| grep -A10 -B10 -E "catalogsources|packages|bundlemetadata"' + #typeline 'kubectl get crds -A| grep -A10 -B10 -E "catalogs|packages|bundlemetadata"' typeline 'kubectl get crds -A' - typeline -x "# create a catalogsource" - typeline "kubectl apply -f config/samples/core_v1beta1_catalogsource.yaml" # or other - typeline "kubectl get catalogsource -A" # shows catalogsource-sample - typeline -x "# waiting for catalogsource to report ready status" - typeline "kubectl wait --for=condition=Ready catalogsource/catalogsource-sample --timeout=1h" + typeline -x "# create a catalog" + typeline "kubectl apply -f config/samples/core_v1beta1_catalog.yaml" # or other + typeline "kubectl get catalog -A" # shows catalog-sample + typeline -x "# waiting for catalog to report ready status" + typeline "kubectl wait --for=condition=Ready catalog/catalog-sample --timeout=1h" # inspect packages, and then details on one package CR typeline -x "# check what 'packages' are available in this catalog and then inspect the content of one of the packages" typeline "kubectl get packages" diff --git a/pkg/apis/core/v1beta1/bundlemetadata_types.go b/pkg/apis/core/v1beta1/bundlemetadata_types.go index 9e041d47..c1071e81 100644 --- a/pkg/apis/core/v1beta1/bundlemetadata_types.go +++ b/pkg/apis/core/v1beta1/bundlemetadata_types.go @@ -45,7 +45,7 @@ type BundleMetadataList struct { // BundleMetadataSpec defines the desired state of BundleMetadata type BundleMetadataSpec struct { - // CatalogSource is the name of the CatalogSource that provides this bundle + // CatalogSource is the name of the Catalog that provides this bundle CatalogSource string `json:"catalogSource"` // Package is the name of the package that provides this bundle diff --git a/pkg/apis/core/v1beta1/package_types.go b/pkg/apis/core/v1beta1/package_types.go index fa5edf76..8c0cd051 100644 --- a/pkg/apis/core/v1beta1/package_types.go +++ b/pkg/apis/core/v1beta1/package_types.go @@ -44,7 +44,7 @@ type PackageList struct { // PackageSpec defines the desired state of Package type PackageSpec struct { - // CatalogSource is the name of the CatalogSource this package belongs to + // CatalogSource is the name of the Catalog this package belongs to CatalogSource string `json:"catalogSource"` CatalogSourceDisplayName string `json:"catalogSourceDisplayName,omitempty"` CatalogSourcePublisher string `json:"catalogSourcePublisher,omitempty"` diff --git a/pkg/controllers/core/catalog_controller.go b/pkg/controllers/core/catalog_controller.go index cbb7f692..13715c57 100644 --- a/pkg/controllers/core/catalog_controller.go +++ b/pkg/controllers/core/catalog_controller.go @@ -117,16 +117,16 @@ func (r *CatalogReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *CatalogReconciler) reconcile(ctx context.Context, catalogSource *corev1beta1.Catalog) (ctrl.Result, error) { - job, err := r.ensureUnpackJob(ctx, catalogSource) +func (r *CatalogReconciler) reconcile(ctx context.Context, catalog *corev1beta1.Catalog) (ctrl.Result, error) { + job, err := r.ensureUnpackJob(ctx, catalog) if err != nil { - updateStatusError(catalogSource, err) + updateStatusError(catalog, err) return ctrl.Result{}, fmt.Errorf("ensuring unpack job: %v", err) } complete, err := r.checkUnpackJobComplete(ctx, job) if err != nil { - updateStatusError(catalogSource, err) + updateStatusError(catalog, err) return ctrl.Result{}, fmt.Errorf("ensuring unpack job completed: %v", err) } if !complete { @@ -135,38 +135,38 @@ func (r *CatalogReconciler) reconcile(ctx context.Context, catalogSource *corev1 declCfg, err := r.parseUnpackLogs(ctx, job) if err != nil { - updateStatusError(catalogSource, err) + updateStatusError(catalog, err) return ctrl.Result{}, err } - if err := r.createPackages(ctx, declCfg, catalogSource); err != nil { - updateStatusError(catalogSource, err) + if err := r.createPackages(ctx, declCfg, catalog); err != nil { + updateStatusError(catalog, err) return ctrl.Result{}, err } - if err := r.createBundleMetadata(ctx, declCfg, catalogSource); err != nil { - updateStatusError(catalogSource, err) + if err := r.createBundleMetadata(ctx, declCfg, catalog); err != nil { + updateStatusError(catalog, err) return ctrl.Result{}, err } // update Catalog status as "Ready" since at this point // all catalog content should be available on cluster - updateStatusReady(catalogSource) + updateStatusReady(catalog) return ctrl.Result{}, nil } // ensureUnpackJob will ensure that an unpack job has been created for the given // Catalog. It will return the unpack job if successful (either the Job already // exists or one was successfully created) or an error if it is unsuccessful -func (r *CatalogReconciler) ensureUnpackJob(ctx context.Context, catalogSource *corev1beta1.Catalog) (*batchv1.Job, error) { +func (r *CatalogReconciler) ensureUnpackJob(ctx context.Context, catalog *corev1beta1.Catalog) (*batchv1.Job, error) { // Create the unpack Job manifest for the given Catalog - job := r.unpackJob(catalogSource) + job := r.unpackJob(catalog) // If the Job already exists just return it. If it doesn't then attempt to create it err := r.Client.Get(ctx, client.ObjectKeyFromObject(job), job) if err != nil { if errors.IsNotFound(err) { - if err = r.createUnpackJob(ctx, catalogSource); err != nil { + if err = r.createUnpackJob(ctx, catalog); err != nil { return nil, err } return job, nil @@ -201,8 +201,8 @@ func (r *CatalogReconciler) checkUnpackJobComplete(ctx context.Context, job *bat // to have the "Ready" condition with a status of "True" and a Reason // of "ContentsAvailable". This function is used to signal that a Catalog // has been successfully unpacked and all catalog contents are available on cluster -func updateStatusReady(catalogSource *corev1beta1.Catalog) { - meta.SetStatusCondition(&catalogSource.Status.Conditions, metav1.Condition{ +func updateStatusReady(catalog *corev1beta1.Catalog) { + meta.SetStatusCondition(&catalog.Status.Conditions, metav1.Condition{ Type: corev1beta1.TypeReady, Reason: corev1beta1.ReasonContentsAvailable, Status: metav1.ConditionTrue, @@ -214,8 +214,8 @@ func updateStatusReady(catalogSource *corev1beta1.Catalog) { // to have the condition Type "Ready" with a Status of "False" and a Reason // of "UnpackError". This function is used to signal that a Catalog // is in an error state and that catalog contents are not available on cluster -func updateStatusError(catalogSource *corev1beta1.Catalog, err error) { - meta.SetStatusCondition(&catalogSource.Status.Conditions, metav1.Condition{ +func updateStatusError(catalog *corev1beta1.Catalog, err error) { + meta.SetStatusCondition(&catalog.Status.Conditions, metav1.Condition{ Type: corev1beta1.TypeReady, Status: metav1.ConditionFalse, Reason: corev1beta1.ReasonUnpackError, @@ -226,14 +226,14 @@ func updateStatusError(catalogSource *corev1beta1.Catalog, err error) { // createBundleMetadata will create a `BundleMetadata` resource for each // "olm.bundle" object that exists for the given catalog contents. Returns an // error if any are encountered. -func (r *CatalogReconciler) createBundleMetadata(ctx context.Context, declCfg *declcfg.DeclarativeConfig, catalogSource *corev1beta1.Catalog) error { +func (r *CatalogReconciler) createBundleMetadata(ctx context.Context, declCfg *declcfg.DeclarativeConfig, catalog *corev1beta1.Catalog) error { for _, bundle := range declCfg.Bundles { bundleMeta := corev1beta1.BundleMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: bundle.Name, }, Spec: corev1beta1.BundleMetadataSpec{ - CatalogSource: catalogSource.Name, + CatalogSource: catalog.Name, Package: bundle.Package, Image: bundle.Image, Properties: []corev1beta1.Property{}, @@ -260,7 +260,7 @@ func (r *CatalogReconciler) createBundleMetadata(ctx context.Context, declCfg *d }) } - ctrlutil.SetOwnerReference(catalogSource, &bundleMeta, r.Scheme) + ctrlutil.SetOwnerReference(catalog, &bundleMeta, r.Scheme) if err := r.Client.Create(ctx, &bundleMeta); err != nil { return fmt.Errorf("creating bundlemetadata %q: %w", bundleMeta.Name, err) @@ -274,7 +274,7 @@ func (r *CatalogReconciler) createBundleMetadata(ctx context.Context, declCfg *d // "olm.package" object that exists for the given catalog contents. // `Package.Spec.Channels` is populated by filtering all "olm.channel" objects // where the "packageName" == `Package.Name`. Returns an error if any are encountered. -func (r *CatalogReconciler) createPackages(ctx context.Context, declCfg *declcfg.DeclarativeConfig, catalogSource *corev1beta1.Catalog) error { +func (r *CatalogReconciler) createPackages(ctx context.Context, declCfg *declcfg.DeclarativeConfig, catalog *corev1beta1.Catalog) error { for _, pkg := range declCfg.Packages { pack := corev1beta1.Package{ ObjectMeta: metav1.ObjectMeta{ @@ -286,7 +286,7 @@ func (r *CatalogReconciler) createPackages(ctx context.Context, declCfg *declcfg Name: pkg.Name, }, Spec: corev1beta1.PackageSpec{ - CatalogSource: catalogSource.Name, + CatalogSource: catalog.Name, DefaultChannel: pkg.DefaultChannel, Channels: []corev1beta1.PackageChannel{}, Description: pkg.Description, @@ -311,7 +311,7 @@ func (r *CatalogReconciler) createPackages(ctx context.Context, declCfg *declcfg } } - ctrlutil.SetOwnerReference(catalogSource, &pack, r.Scheme) + ctrlutil.SetOwnerReference(catalog, &pack, r.Scheme) if err := r.Client.Create(ctx, &pack); err != nil { return fmt.Errorf("creating package %q: %w", pack.Name, err) From 654eb8ff2c569e7c30b24547484d49a77ab374fd Mon Sep 17 00:00:00 2001 From: Bryce Palmer Date: Mon, 15 May 2023 09:32:49 -0400 Subject: [PATCH 7/7] update pprof readme references Signed-off-by: Bryce Palmer --- pprof/README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pprof/README.md b/pprof/README.md index 88d76435..307a30f9 100644 --- a/pprof/README.md +++ b/pprof/README.md @@ -1,6 +1,6 @@ ## pprof -This folder contains some profiles that can be read using [pprof](https://github.com/google/pprof) to show how the core kubernetes apiserver and the custom catalogd apiserver CPU & Memory utilization is affected by the creation and reconciliation of the sample `CatalogSource` CR found at `../config/samples/catalogsource.yaml`. +This folder contains some profiles that can be read using [pprof](https://github.com/google/pprof) to show how the core kubernetes apiserver and the custom catalogd apiserver CPU & Memory utilization is affected by the creation and reconciliation of the sample `Catalog` CR found at `../config/samples/core_v1beta1_catalog.yaml`. Instead of providing static screenshots and losing the interactivity associated with these `pprof` profiles, each of the files with the extension `.pb` can be used to view the profiles that were the result of running `pprof` against the live processes. @@ -18,12 +18,12 @@ Here is a brief breakdown of what information is provided in each profile file i - `kubeapiserver_heap_profile.pb` - This is the Memory utilization of the core kube-apiserver - `catalogd_apiserver_cpu_profile.pb` - This is the CPU utilization of the custom catalogd apiserver - `catalogd_apiserver_heap_profile.pb` - This is the Memory utilization of the custom catalogd apiserver -- `manager_cpu_profile.pb` - This is the CPU utilization of the CatalogSource controller (and other controllers associated with this manager). -- `manager_heap_profile.pb` - This is the Memory utilization of the CatalogSource controller (and other controllers associated with this manager). +- `manager_cpu_profile.pb` - This is the CPU utilization of the Catalog controller (and other controllers associated with this manager). +- `manager_heap_profile.pb` - This is the Memory utilization of the Catalog controller (and other controllers associated with this manager). - `kubeapiserver_alone_cpu_profile.pb` - This is the CPU utilization for the core kube-apiserver without running our custom apiserver - `kubeapiserver_alone_heap_profile.pb` - This is the Memory utilization for the core kube-apiserver without running our custom apiserver -> **NOTE**: All profiles were collected ASAP after all child resources were created from the reconciliation of the sample `CatalogSource` CR. +> **NOTE**: All profiles were collected ASAP after all child resources were created from the reconciliation of the sample `Catalog` CR. ## Pprof Breakdown @@ -81,7 +81,7 @@ This section is being added as the pprof metrics don't necessarily show the whol This image shows the spike in CPU utilization and the increase in Memory consumption. In this scenario, the command: ``` -kubectl apply -f config/samples/catalogsource.yaml +kubectl apply -f config/samples/core_v1beta1_catalog.yaml ``` was run right at 1:44 PM. @@ -90,7 +90,7 @@ The CPU spike lasted ~3 minutes and the values were: - 1:45PM (PEAK) - 0.223 cores - 1:47PM - 0.078 cores -With this, we can see that without the catalogd apiserver the core kube-apiserver had a CPU utilization spike of 0.156 cores and then settled at ~0.011 cores above what the utilization was prior to the reconciliation of the sample `CatalogSource` CR. +With this, we can see that without the catalogd apiserver the core kube-apiserver had a CPU utilization spike of 0.156 cores and then settled at ~0.011 cores above what the utilization was prior to the reconciliation of the sample `Catalog` CR. The memory consumption increased over the span of ~3 minutes and then stabilized. The values were: - 1:44PM - 289Mi @@ -108,7 +108,7 @@ With this, we can see that without the catalogd apiserver the core kube-apiserve This image shows the spike in CPU utilization and the increase in Memory consumption. In this scenario, the command: ``` -kubectl apply -f config/samples/catalogsource.yaml +kubectl apply -f config/samples/core_v1beta1_catalog.yaml ``` was run right at 3:06 PM @@ -118,7 +118,7 @@ The CPU spike lasted ~3 minutes and the values were: - 3:08 PM (PEAK) - 0.215 cores - 3:09 PM - 0.091 cores -With this, we can see that with the catalogd apiserver the core kube-apiserver had a CPU utilization spike of 0.125 cores and then settled at ~0.001 cores above what the utilization was prior to the reconciliation of the sample `CatalogSource` CR. +With this, we can see that with the catalogd apiserver the core kube-apiserver had a CPU utilization spike of 0.125 cores and then settled at ~0.001 cores above what the utilization was prior to the reconciliation of the sample `Catalog` CR. The memory consumption increased over the span of ~3 minutes and then stabilized. The values were: - 3:06PM - 337Mi @@ -135,7 +135,7 @@ With this, we can see that with the catalogd apiserver the core kube-apiserver h This image shows the spike in CPU utilization and the increase in Memory consumption. In this scenario, the command: ``` -kubectl apply -f config/samples/catalogsource.yaml +kubectl apply -f config/samples/core_v1beta1_catalog.yaml ``` was run right at 3:06 PM @@ -170,7 +170,7 @@ Overall, when running both the kube-apiserver and the catalogd apiserver the tot This image shows the spike in CPU utilization and the increase in Memory consumption. In this scenario, the command: ``` -kubectl apply -f config/samples/catalogsource.yaml +kubectl apply -f config/samples/core_v1beta1_catalog.yaml ``` was run right at 3:06 PM