From de9cfd9ea139dad1cea615c6a504a7452b848493 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Mon, 6 May 2024 16:46:56 -0700 Subject: [PATCH] WIP: Use a single cache for all dynamic controllers Crossplane uses a controller engine to dynamically start claim and XR controllers when a new XRD is installed. Before this commit, each controller gets at least one cache. This is because when I built this functionality, you couldn't stop a single informer within a cache (a cache is basically a map of informers by GVK). When realtime composition is enabled, there are even more caches. One per composed resource GVK. A GVK routed cache routes cache lookups to these various delegate caches. Meanwhile, controller-runtime recently made it possible to stop an informer within a cache. It's also been possible to remove an event handler from an informer for some time (since Kubernetes 1.26). https://github.com/kubernetes-sigs/controller-runtime/pull/2285 https://github.com/kubernetes-sigs/controller-runtime/pull/2046 This commit uses a single client, backed by a single cache, across all dynamic controllers (specifically the definition, offered, claim, and XR controllers). Compared to the current implementation, this commit: * Ensures all dynamic controllers use clients backed by the same cache used to power watches (i.e. trigger reconciles). * Takes fewer global locks when realtime compositions are enabled. Locking is now mostly at the controller scope. * Works with the breaking changes to source.Source introduced in controller-runtime v0.18. :) Notably when realtime compositions are enabled, XR controllers will get XRs and composed resources from cache. Before this commit, their client wasn't backed by a cache. They'd get resources directly from the API server. Similarly, the claim controller will read claims from cache. Finally, I think this makes the realtime composition code a little easier to follow by consolodating it into the ControllerEngine, but that's pretty subjective. Signed-off-by: Nic Cope --- cmd/crossplane/core/core.go | 88 +- go.mod | 4 +- .../apiextensions/claim/reconciler.go | 13 +- .../apiextensions/claim/reconciler_test.go | 548 ++++++------ .../apiextensions/composite/reconciler.go | 143 +-- .../composite/reconciler_test.go | 546 ++++-------- .../apiextensions/composite/watch/watch.go | 143 +++ .../apiextensions/controller/options.go | 12 + .../apiextensions/definition/composed.go | 287 ------ .../apiextensions/definition/handlers.go | 113 +++ .../apiextensions/definition/handlers_test.go | 237 +++++ .../apiextensions/definition/indexes.go | 56 +- .../apiextensions/definition/indexes_test.go | 44 - .../apiextensions/definition/reconciler.go | 332 ++++--- .../definition/reconciler_test.go | 835 +++++++----------- .../apiextensions/offered/reconciler.go | 120 ++- .../apiextensions/offered/reconciler_test.go | 628 +++++++------ internal/controller/engine/cache.go | 336 +++---- internal/controller/engine/engine.go | 486 ++++++---- internal/controller/engine/engine_test.go | 179 +--- internal/controller/engine/source.go | 189 ++++ 21 files changed, 2542 insertions(+), 2797 deletions(-) create mode 100644 internal/controller/apiextensions/composite/watch/watch.go delete mode 100644 internal/controller/apiextensions/definition/composed.go create mode 100644 internal/controller/apiextensions/definition/handlers.go create mode 100644 internal/controller/apiextensions/definition/handlers_test.go create mode 100644 internal/controller/engine/source.go diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index d82dd552220..5228b343e97 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -21,6 +21,7 @@ import ( "context" "crypto/tls" "fmt" + "io" "os" "path/filepath" "time" @@ -30,6 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" + kcache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -47,6 +49,7 @@ import ( "github.com/crossplane/crossplane/internal/controller/apiextensions" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" + "github.com/crossplane/crossplane/internal/controller/engine" "github.com/crossplane/crossplane/internal/controller/pkg" pkgcontroller "github.com/crossplane/crossplane/internal/controller/pkg/controller" "github.com/crossplane/crossplane/internal/features" @@ -134,6 +137,8 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli Deduplicate: true, }) + // The claim and XR controllers don't use the manager's cache or client. + // They use their own. They're setup later in this method. eb := record.NewBroadcaster() mgr, err := ctrl.NewManager(ratelimiter.LimitRESTConfig(cfg, c.MaxReconcileRate), ctrl.Options{ Scheme: s, @@ -270,9 +275,88 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli log.Info("Alpha feature enabled", "flag", features.EnableAlphaClaimSSA) } + // Claim and XR controllers are started and stopped dynamically by the + // ControllerEngine below. When realtime compositions are enabled, they also + // start and stop their watches (e.g. of composed resources) dynamically. To + // do this, the ControllerEngine must have exclusive ownership of a cache. + // This allows it to track what controllers are using the cache's informers. + ca, err := cache.New(mgr.GetConfig(), cache.Options{ + HTTPClient: mgr.GetHTTPClient(), + Scheme: mgr.GetScheme(), + Mapper: mgr.GetRESTMapper(), + SyncPeriod: &c.SyncInterval, + + // When a CRD is deleted, any informers for its GVKs will start trying + // to restart their watches, and fail with scary errors. This should + // only happen when realtime composition is enabled, and we should GC + // the informer within 60 seconds. This handler tries to make the error + // a little more informative, and less scary. + DefaultWatchErrorHandler: func(_ *kcache.Reflector, err error) { + if errors.Is(io.EOF, err) { + // Watch closed normally. + return + } + log.Debug("Watch error - probably due to CRD being uninstalled", "error", err) + }, + }) + if err != nil { + return errors.Wrap(err, "cannot create cache for API extension controllers") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + // Don't start the cache until the manager is elected. + <-mgr.Elected() + + if err := ca.Start(ctx); err != nil { + log.Info("API extensions cache returned an error", "error", err) + } + + log.Info("API extensions cache stopped") + }() + + cl, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Scheme: mgr.GetScheme(), + Mapper: mgr.GetRESTMapper(), + Cache: &client.CacheOptions{ + Reader: ca, + + // Don't cache secrets - there may be a lot of them. + DisableFor: []client.Object{&corev1.Secret{}}, + + // When this is enabled the client will automatically start a cache + // informer whenever an XR controller Gets a new kind of composed + // resource. + // + // We need to stop the informer when it's not needed anymore. At + // best an unused informer wastes memory and keeps an unneeded watch + // open on the API server. At worst, the CRD the informer listens + // for is uninstalled and the informer continuously tries to + // restart its watch, logging errors the whole time. + // + // When realtime composition is enabled, controllers record what + // GVKs they watch. We can garbage collect an informer when no + // controller watches its types. When it's not enabled we don't know + // when to garbage collect informers. + // + // TODO(negz): GC informers when their CRD is deleted. + // TODO(negz): GC informers when no XR composes their GVK. + Unstructured: o.Features.Enabled(features.EnableAlphaRealtimeCompositions), + }, + }) + if err != nil { + return errors.Wrap(err, "cannot create client for API extension controllers") + } + + ce := engine.New(mgr, engine.TrackInformers(ca, mgr.GetScheme()), engine.WithLogger(log)) ao := apiextensionscontroller.Options{ - Options: o, - FunctionRunner: functionRunner, + Options: o, + ControllerClient: cl, + ControllerFieldIndexer: ca, + ControllerEngine: ce, + FunctionRunner: functionRunner, } if err := apiextensions.Setup(mgr, ao); err != nil { diff --git a/go.mod b/go.mod index 9d1351d978d..ea1e43e7438 100644 --- a/go.mod +++ b/go.mod @@ -142,7 +142,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -154,7 +154,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.6.0 + github.com/google/uuid v1.6.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index 061e74661d5..be4cc7d05c7 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -27,7 +27,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -37,7 +36,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" @@ -221,14 +219,6 @@ func defaultCRClaim(c client.Client) crClaim { // A ReconcilerOption configures a Reconciler. type ReconcilerOption func(*Reconciler) -// WithClient specifies how the Reconciler should interact with the Kubernetes -// API. -func WithClient(c client.Client) ReconcilerOption { - return func(r *Reconciler) { - r.client = c - } -} - // WithManagedFieldsUpgrader specifies how the Reconciler should upgrade claim // and composite resource (XR) managed fields from client-side apply to // server-side apply. @@ -300,8 +290,7 @@ func WithPollInterval(after time.Duration) ReconcilerOption { // The returned Reconciler will apply only the ObjectMetaConfigurator by // default; most callers should supply one or more CompositeConfigurators to // configure their composite resources. -func NewReconciler(m manager.Manager, of resource.CompositeClaimKind, with resource.CompositeKind, o ...ReconcilerOption) *Reconciler { - c := unstructured.NewClient(m.GetClient()) +func NewReconciler(c client.Client, of resource.CompositeClaimKind, with resource.CompositeKind, o ...ReconcilerOption) *Reconciler { r := &Reconciler{ client: c, gvkClaim: schema.GroupVersionKind(of), diff --git a/internal/controller/apiextensions/claim/reconciler_test.go b/internal/controller/apiextensions/claim/reconciler_test.go index b27d52eca15..f8a1ff8992a 100644 --- a/internal/controller/apiextensions/claim/reconciler_test.go +++ b/internal/controller/apiextensions/claim/reconciler_test.go @@ -28,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -36,7 +35,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" @@ -47,10 +45,10 @@ func TestReconcile(t *testing.T) { now := metav1.Now() type args struct { - mgr manager.Manager - of resource.CompositeClaimKind - with resource.CompositeKind - opts []ReconcilerOption + client client.Client + of resource.CompositeClaimKind + with resource.CompositeKind + opts []ReconcilerOption } type want struct { r reconcile.Result @@ -65,11 +63,8 @@ func TestReconcile(t *testing.T) { "ClaimNotFound": { reason: "We should not return an error if the composite resource was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), }, }, want: want{ @@ -79,11 +74,8 @@ func TestReconcile(t *testing.T) { "GetClaimError": { reason: "We should return any error we encounter getting the claim.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), }, }, want: want{ @@ -94,19 +86,16 @@ func TestReconcile(t *testing.T) { "ReconciliationPaused": { reason: `If a claim has the pause annotation with value "true" we should stop reconciling and not requeue.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - cm.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + cm.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + })), }, }, want: want{ @@ -116,21 +105,20 @@ func TestReconcile(t *testing.T) { "ReconciliationUnpaused": { reason: "If a claim has the ReconcilePaused status condition but no paused annotation, the condition should change to ReconcileSuccess.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - // This claim was paused. - obj.(*claim.Unstructured).SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that our synced status condition changed - // from Paused to ReconcileSuccess. - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(Waiting()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + // This claim was paused. + obj.(*claim.Unstructured).SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that our synced status condition changed + // from Paused to ReconcileSuccess. + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(Waiting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -147,27 +135,24 @@ func TestReconcile(t *testing.T) { "GetCompositeError": { reason: "The reconcile should fail if we can't get the XR, unless it wasn't found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Return an error getting the XR. - return errBoom - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errGetComposite))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Return an error getting the XR. + return errBoom + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errGetComposite))) + })), }, }, want: want{ @@ -177,29 +162,26 @@ func TestReconcile(t *testing.T) { "CompositeAlreadyBoundError": { reason: "The reconcile should fail if the referenced XR is bound to another claim", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // This XR was created, and references another - // claim. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{Name: "some-other-claim"}) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Errorf(errFmtUnbound, "", "some-other-claim"))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // This XR was created, and references another + // claim. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{Name: "some-other-claim"}) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Errorf(errFmtUnbound, "", "some-other-claim"))) + })), }, }, want: want{ @@ -209,31 +191,30 @@ func TestReconcile(t *testing.T) { "DeleteCompositeError": { reason: "We should not try to delete if the resource is already gone.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists. - o.SetCreationTimestamp(now) - } - return nil - }), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteComposite))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists. + o.SetCreationTimestamp(now) + } + return nil }), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteComposite))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -246,21 +227,20 @@ func TestReconcile(t *testing.T) { "UnpublishConnectionDetailsError": { reason: "The reconcile should fail if we can't unpublish the claim's connection details.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteCDs))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteCDs))) + })), + }, + opts: []ReconcilerOption{ WithConnectionUnpublisher(ConnectionUnpublisherFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ managed.ConnectionDetails) error { return errBoom })), @@ -273,21 +253,20 @@ func TestReconcile(t *testing.T) { "RemoveFinalizerError": { reason: "The reconcile should fail if we can't remove the claim's finalizer.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), @@ -300,21 +279,20 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue if we successfully delete the resource.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileSuccess()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileSuccess()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -327,28 +305,27 @@ func TestReconcile(t *testing.T) { "SuccessfulForegroundDelete": { reason: "We should requeue if we successfully delete the bound composite resource using Foreground deletion", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - // We want to foreground delete. - fg := xpv1.CompositeDeleteForeground - o.SetCompositeDeletePolicy(&fg) - case *composite.Unstructured: - // Pretend the XR exists and is bound. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - } - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // We want to foreground delete. + fg := xpv1.CompositeDeleteForeground + o.SetCompositeDeletePolicy(&fg) + case *composite.Unstructured: + // Pretend the XR exists and is bound. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + } + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -361,40 +338,38 @@ func TestReconcile(t *testing.T) { "ForegroundDeleteWaitForCompositeDeletion": { reason: "We should requeue if we successfully deleted the bound composite resource using Foreground deletion and it has not yet been deleted", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - // We want to foreground delete. - fg := xpv1.CompositeDeleteForeground - o.SetCompositeDeletePolicy(&fg) - case *composite.Unstructured: - // Pretend the XR exists and is bound, but is - // being deleted. - o.SetCreationTimestamp(now) - o.SetDeletionTimestamp(&now) - o.SetClaimReference(&claim.Reference{}) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) // We want to foreground delete. fg := xpv1.CompositeDeleteForeground - cm.SetCompositeDeletePolicy(&fg) + o.SetCompositeDeletePolicy(&fg) + case *composite.Unstructured: + // Pretend the XR exists and is bound, but is + // being deleted. + o.SetCreationTimestamp(now) + o.SetDeletionTimestamp(&now) + o.SetClaimReference(&claim.Reference{}) + } + return nil + }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // We want to foreground delete. + fg := xpv1.CompositeDeleteForeground + cm.SetCompositeDeletePolicy(&fg) - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - })), - }, - ), + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -407,15 +382,14 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should fail the reconcile if we can't add the claim's finalizer", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) - })), - }), WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), @@ -428,15 +402,14 @@ func TestReconcile(t *testing.T) { "SyncCompositeError": { reason: "We should fail the reconcile if we can't bind and sync the claim with a composite resource", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSync))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSync))) - })), - }), WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -450,31 +423,30 @@ func TestReconcile(t *testing.T) { "CompositeNotReady": { reason: "We should return early if the bound composite resource is not yet ready", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is bound, but is - // still being created. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Creating()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(Waiting()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is bound, but is + // still being created. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Creating()) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(Waiting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -488,29 +460,28 @@ func TestReconcile(t *testing.T) { "PropagateConnectionError": { reason: "We should fail the reconcile if we can't propagate the bound XR's connection details", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is available. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Available()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPropagateCDs))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is available. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Available()) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPropagateCDs))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -527,31 +498,30 @@ func TestReconcile(t *testing.T) { "SuccessfulReconcile": { reason: "We should not requeue if we successfully synced the composite resource and propagated its connection details", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is available. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Available()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConnectionDetailsLastPublishedTime(&now) - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(xpv1.Available()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is available. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Available()) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConnectionDetailsLastPublishedTime(&now) + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(xpv1.Available()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), @@ -569,7 +539,7 @@ func TestReconcile(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, tc.args.of, tc.args.with, tc.args.opts...) + r := NewReconciler(tc.args.client, tc.args.of, tc.args.with, tc.args.opts...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index e2d634a5577..e9581c7a796 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -26,13 +26,9 @@ import ( kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -42,10 +38,11 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/controller/engine" ) const ( @@ -255,14 +252,6 @@ func WithPollInterval(interval time.Duration) ReconcilerOption { }) } -// WithClient specifies how the Reconciler should interact with the Kubernetes -// API. -func WithClient(c client.Client) ReconcilerOption { - return func(r *Reconciler) { - r.client = c - } -} - // WithCompositionRevisionFetcher specifies how the composition to be used should be // fetched. func WithCompositionRevisionFetcher(f CompositionRevisionFetcher) ReconcilerOption { @@ -336,11 +325,13 @@ func WithComposer(c Composer) ReconcilerOption { } } -// WithKindObserver specifies how the Reconciler should observe kinds for -// realtime events. -func WithKindObserver(o KindObserver) ReconcilerOption { +// WithWatchStarter specifies how the Reconciler should start watches for any +// resources it composes. +func WithWatchStarter(controllerName string, h handler.EventHandler, w WatchStarter) ReconcilerOption { return func(r *Reconciler) { - r.kindObserver = o + r.controllerName = controllerName + r.watchHandler = h + r.engine = w } } @@ -363,6 +354,19 @@ func (fn CompositionRevisionValidatorFn) Validate(c *v1.CompositionRevision) err return fn(c) } +// A WatchStarter can start a new watch. XR controllers use this to dynamically +// start watches when they compose new kinds of resources. +type WatchStarter interface { + // StartWatches starts the supplied watches, if they're not running already. + StartWatches(name string, ws ...engine.Watch) error +} + +// A NopWatchStarter does nothing. +type NopWatchStarter struct{} + +// StartWatches does nothing. +func (n *NopWatchStarter) StartWatches(_ string, _ ...engine.Watch) error { return nil } + type environment struct { EnvironmentFetcher } @@ -375,34 +379,15 @@ type compositeResource struct { managed.ConnectionPublisher } -// KindObserver tracks kinds of referenced composed resources in composite -// resources in order to start watches for them for realtime events. -type KindObserver interface { - // WatchComposedResources starts a watch of the given kinds to trigger reconciles when - // a referenced object of those kinds changes. - WatchComposedResources(kind ...schema.GroupVersionKind) -} - -// KindObserverFunc implements KindObserver as a function. -type KindObserverFunc func(kind ...schema.GroupVersionKind) - -// WatchComposedResources starts a watch of the given kinds to trigger reconciles when -// a referenced object of those kinds changes. -func (fn KindObserverFunc) WatchComposedResources(kind ...schema.GroupVersionKind) { - fn(kind...) -} - // NewReconciler returns a new Reconciler of composite resources. -func NewReconciler(mgr manager.Manager, of resource.CompositeKind, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - +func NewReconciler(c client.Client, of resource.CompositeKind, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - client: kube, + client: c, gvk: schema.GroupVersionKind(of), revision: revision{ - CompositionRevisionFetcher: NewAPIRevisionFetcher(resource.ClientApplicator{Client: kube, Applicator: resource.NewAPIPatchingApplicator(kube)}), + CompositionRevisionFetcher: NewAPIRevisionFetcher(resource.ClientApplicator{Client: c, Applicator: resource.NewAPIPatchingApplicator(c)}), CompositionRevisionValidator: CompositionRevisionValidatorFn(func(rev *v1.CompositionRevision) error { // TODO(negz): Presumably this validation will eventually be // removed in favor of the new Composition validation @@ -421,18 +406,21 @@ func NewReconciler(mgr manager.Manager, of resource.CompositeKind, opts ...Recon }, composite: compositeResource{ - Finalizer: resource.NewAPIFinalizer(kube, finalizer), - CompositionSelector: NewAPILabelSelectorResolver(kube), + Finalizer: resource.NewAPIFinalizer(c, finalizer), + CompositionSelector: NewAPILabelSelectorResolver(c), EnvironmentSelector: NewNoopEnvironmentSelector(), - Configurator: NewConfiguratorChain(NewAPINamingConfigurator(kube), NewAPIConfigurator(kube)), + Configurator: NewConfiguratorChain(NewAPINamingConfigurator(c), NewAPIConfigurator(c)), // TODO(negz): In practice this is a filtered publisher that will // never filter any keys. Is there an unfiltered variant we could // use by default instead? - ConnectionPublisher: NewAPIFilteredSecretPublisher(kube, []string{}), + ConnectionPublisher: NewAPIFilteredSecretPublisher(c, []string{}), }, - resource: NewPTComposer(kube), + resource: NewPTComposer(c), + + // Dynamic watches are disabled by default. + engine: &NopWatchStarter{}, log: logging.NewNopLogger(), record: event.NewNopRecorder(), @@ -458,8 +446,12 @@ type Reconciler struct { revision revision composite compositeResource - resource Composer - kindObserver KindObserver + resource Composer + + // Used to dynamically start composed resource watches. + controllerName string + engine WatchStarter + watchHandler handler.EventHandler log logging.Logger record event.Recorder @@ -624,12 +616,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } - if r.kindObserver != nil { - var gvks []schema.GroupVersionKind - for _, ref := range xr.GetResourceReferences() { - gvks = append(gvks, ref.GroupVersionKind()) - } - r.kindObserver.WatchComposedResources(gvks...) + ws := make([]engine.Watch, len(xr.GetResourceReferences())) + for i, ref := range xr.GetResourceReferences() { + ws[i] = engine.WatchFor(composed.New(composed.FromReference(ref)), r.watchHandler) + } + if err := r.engine.StartWatches(r.controllerName, ws...); err != nil { + // TODO(negz): If we stop polling this will be a more serious error. + log.Debug("Cannot start watches for composed resources. Relying on polling to know when they change.", "controller-name", r.controllerName) } published, err := r.composite.PublishConnection(ctx, xr, res.ConnectionDetails) @@ -731,47 +724,3 @@ func getComposerResourcesNames(cds []ComposedResource) []string { } return names } - -// EnqueueForCompositionRevisionFunc returns a function that enqueues (the -// related) XRs when a new CompositionRevision is created. This speeds up -// reconciliation of XRs on changes to the Composition by not having to wait for -// the 60s sync period, but be instant. -func EnqueueForCompositionRevisionFunc(of resource.CompositeKind, list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error, log logging.Logger) func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { - return func(ctx context.Context, createEvent runtimeevent.TypedCreateEvent[*v1.CompositionRevision], q workqueue.RateLimitingInterface) { - rev := createEvent.Object - - // get all XRs - xrs := kunstructured.UnstructuredList{} - xrs.SetGroupVersionKind(schema.GroupVersionKind(of)) - xrs.SetKind(schema.GroupVersionKind(of).Kind + "List") - if err := list(ctx, &xrs); err != nil { - // logging is most we can do here. This is a programming error if it happens. - log.Info("cannot list in CompositionRevision handler", "type", schema.GroupVersionKind(of).String(), "error", err) - return - } - - // enqueue all those that reference the Composition of this revision - compName := rev.Labels[v1.LabelCompositionName] - if compName == "" { - return - } - for _, u := range xrs.Items { - xr := composite.Unstructured{Unstructured: u} - - // only automatic - if pol := xr.GetCompositionUpdatePolicy(); pol != nil && *pol == xpv1.UpdateManual { - continue - } - - // only those that reference the right Composition - if ref := xr.GetCompositionReference(); ref == nil || ref.Name != compName { - continue - } - - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: xr.GetName(), - Namespace: xr.GetNamespace(), - }}) - } - } -} diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index fbefe27eb14..4ca72dd50cf 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -18,7 +18,6 @@ package composite import ( "context" - "reflect" "testing" "time" @@ -27,23 +26,16 @@ import ( corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/event" - "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" @@ -57,9 +49,9 @@ func TestReconcile(t *testing.T) { cd := managed.ConnectionDetails{"a": []byte("b")} type args struct { - mgr manager.Manager - of resource.CompositeKind - opts []ReconcilerOption + client client.Client + of resource.CompositeKind + opts []ReconcilerOption } type want struct { r reconcile.Result @@ -76,11 +68,8 @@ func TestReconcile(t *testing.T) { "CompositeResourceNotFound": { reason: "We should not return an error if the composite resource was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), }, }, want: want{ @@ -90,11 +79,8 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceError": { reason: "We should return error encountered while getting the composite resource.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), }, }, want: want{ @@ -104,17 +90,16 @@ func TestReconcile(t *testing.T) { "UnpublishConnectionError": { reason: "We should return any error encountered while unpublishing connection details.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(want resource.Composite) { + want.SetDeletionTimestamp(&now) + want.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errUnpublish))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(want resource.Composite) { - want.SetDeletionTimestamp(&now) - want.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errUnpublish))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithConnectionPublishers(managed.ConnectionPublisherFns{ UnpublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) error { @@ -130,17 +115,16 @@ func TestReconcile(t *testing.T) { "RemoveFinalizerError": { reason: "We should return any error encountered while removing finalizer.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom @@ -160,17 +144,16 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should return no error when deleted successfully.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileSuccess()) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileSuccess()) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil @@ -190,14 +173,13 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error encountered while adding finalizer.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom @@ -212,14 +194,13 @@ func TestReconcile(t *testing.T) { "SelectCompositionError": { reason: "We should return any error encountered while selecting a composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSelectComp))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSelectComp))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return errBoom @@ -233,15 +214,14 @@ func TestReconcile(t *testing.T) { "FetchCompositionError": { reason: "We should return any error encountered while fetching a composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errFetchComp))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errFetchComp))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -259,15 +239,14 @@ func TestReconcile(t *testing.T) { "ValidateCompositionError": { reason: "We should return any error encountered while validating our Composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errValidate))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errValidate))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -288,15 +267,14 @@ func TestReconcile(t *testing.T) { "ConfigureCompositeError": { reason: "We should return any error encountered while configuring the composite resource.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errConfigure))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errConfigure))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -318,12 +296,11 @@ func TestReconcile(t *testing.T) { "SelectEnvironmentError": { reason: "We should return any error encountered while selecting the environment.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return nil @@ -346,12 +323,11 @@ func TestReconcile(t *testing.T) { "FetchEnvironmentError": { reason: "We should requeue on any error encountered while fetching the environment.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return nil @@ -374,15 +350,14 @@ func TestReconcile(t *testing.T) { "ComposeResourcesError": { reason: "We should return any error encountered while composing resources.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errCompose))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errCompose))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -407,15 +382,14 @@ func TestReconcile(t *testing.T) { "PublishConnectionDetailsError": { reason: "We should return any error encountered while publishing connection details.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPublish))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPublish))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -445,15 +419,14 @@ func TestReconcile(t *testing.T) { "CompositionWarnings": { reason: "We should not requeue if our Composer returned warning events.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(xr resource.Composite) { + xr.SetCompositionReference(&corev1.ObjectReference{}) + xr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(xr resource.Composite) { - xr.SetCompositionReference(&corev1.ObjectReference{}) - xr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -488,15 +461,14 @@ func TestReconcile(t *testing.T) { "ComposedResourcesNotReady": { reason: "We should requeue if any of our composed resources are not yet ready.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Creating().WithMessage("Unready resources: cat, cow, elephant, and 1 more")) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Creating().WithMessage("Unready resources: cat, cow, elephant, and 1 more")) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -555,16 +527,15 @@ func TestReconcile(t *testing.T) { "ComposedResourcesReady": { reason: "We should requeue after our poll interval if all of our composed resources are ready.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -601,17 +572,14 @@ func TestReconcile(t *testing.T) { "ReconciliationPausedSuccessful": { reason: `If a composite resource has the pause annotation with value "true", there should be no further requeue requests.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - cr.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - })), - }), + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + cr.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + })), }, }, want: want{ @@ -621,14 +589,11 @@ func TestReconcile(t *testing.T) { "ReconciliationPausedError": { reason: `If a composite resource has the pause annotation with value "true" and the status update due to reconciliation being paused fails, error should be reported causing an exponentially backed-off requeue.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - })), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }), + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + })), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), }, }, want: want{ @@ -638,20 +603,19 @@ func TestReconcile(t *testing.T) { "ReconciliationResumes": { reason: `If a composite resource has the pause annotation with some value other than "true" and the Synced=False/ReconcilePaused status condition, reconciliation should resume with requeueing.`, args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) + cr.SetConditions(xpv1.ReconcilePaused()) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + cr.SetCompositionReference(&corev1.ObjectReference{}) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) - cr.SetConditions(xpv1.ReconcilePaused()) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - cr.SetCompositionReference(&corev1.ObjectReference{}) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -684,20 +648,19 @@ func TestReconcile(t *testing.T) { "ReconciliationResumesAfterAnnotationRemoval": { reason: `If a composite resource has the pause annotation removed and the Synced=False/ReconcilePaused status condition, reconciliation should resume with requeueing.`, args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + // no annotation atm + // (but reconciliations were already paused) + cr.SetConditions(xpv1.ReconcilePaused()) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + cr.SetCompositionReference(&corev1.ObjectReference{}) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - // no annotation atm - // (but reconciliations were already paused) - cr.SetConditions(xpv1.ReconcilePaused()) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - cr.SetCompositionReference(&corev1.ObjectReference{}) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -731,7 +694,7 @@ func TestReconcile(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, tc.args.of, tc.args.opts...) + r := NewReconciler(tc.args.client, tc.args.of, tc.args.opts...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { @@ -843,214 +806,3 @@ func TestFilterToXRPatches(t *testing.T) { }) } } - -func TestEnqueueForCompositionRevisionFunc(t *testing.T) { - type args struct { - of schema.GroupVersionKind - list func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error - event runtimeevent.TypedCreateEvent[*v1.CompositionRevision] - } - type want struct { - added []interface{} - } - - dog := schema.GroupVersionKind{Group: "example.com", Version: "v1", Kind: "Dog"} - dogList := dog.GroupVersion().WithKind("DogList") - - tests := []struct { - name string - args args - want want - }{ - { - name: "empty", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { - // test parameters only here, not in the later tests for brevity. - u, ok := list.(*kunstructured.UnstructuredList) - if !ok { - t.Errorf("list was not an UnstructuredList") - } else if got := u.GroupVersionKind(); got != dogList { - t.Errorf("list was not a DogList, got: %s", got) - } - if len(opts) != 0 { - t.Errorf("unexpected list options: %#v", opts) - } - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - }, - { - name: "automatic management policy", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{ - added: []interface{}{reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: "ns", - Name: "obj1", - }}}, - }, - }, - { - name: "manual management policy", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateManual - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{}, - }, - { - name: "other composition", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{}, - }, - { - name: "multiple", - args: args{ - of: dog, - list: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - automatic := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&automatic) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - obj2 := obj1.DeepCopy() - obj2.SetName("obj2") - - obj3 := obj1.DeepCopy() - obj3.SetName("obj3") - obj3.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) - - obj4 := obj1.DeepCopy() - obj4.SetName("obj4") - manual := xpv1.UpdateManual - obj4.SetCompositionUpdatePolicy(&manual) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ - obj1.Unstructured, - obj2.Unstructured, - obj3.Unstructured, - } - - return nil - }, - event: runtimeevent.TypedCreateEvent[*v1.CompositionRevision]{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{ - added: []interface{}{ - reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj1"}}, - reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj2"}}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fn := EnqueueForCompositionRevisionFunc(resource.CompositeKind(tt.args.of), tt.args.list, logging.NewNopLogger()) - q := rateLimitingQueueMock{} - fn(context.TODO(), tt.args.event, &q) - if got := q.added; !reflect.DeepEqual(got, tt.want.added) { - t.Errorf("EnqueueForCompositionRevisionFunc(...)(ctx, event, queue) = %v, want %v", got, tt.want) - } - }) - } -} - -type rateLimitingQueueMock struct { - workqueue.RateLimitingInterface - added []interface{} -} - -func (f *rateLimitingQueueMock) Add(item interface{}) { - f.added = append(f.added, item) -} diff --git a/internal/controller/apiextensions/composite/watch/watch.go b/internal/controller/apiextensions/composite/watch/watch.go new file mode 100644 index 00000000000..16272b9c71f --- /dev/null +++ b/internal/controller/apiextensions/composite/watch/watch.go @@ -0,0 +1,143 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package watch implements a garbage collector for composed resource watches. +package watch + +import ( + "context" + "time" + + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" +) + +// A ControllerEngine can stop watches for a controller, and remove informers. +type ControllerEngine interface { + StopWatches(ctx context.Context, name string, keep ...schema.GroupVersionKind) error + RemoveUnwatchedInformers(ctx context.Context) error +} + +// A GarbageCollector garbage collects watches for a composite resource +// controller. +type GarbageCollector struct { + controllerName string + xrGVK schema.GroupVersionKind + + c client.Reader + ce ControllerEngine + + alwaysKeep []schema.GroupVersionKind + + log logging.Logger +} + +// A GarbageCollectorOption configures a GarbageCollector. +type GarbageCollectorOption func(gc *GarbageCollector) + +// WithLogger configures how a GarbageCollector should log. +func WithLogger(l logging.Logger) GarbageCollectorOption { + return func(gc *GarbageCollector) { + gc.log = l + } +} + +// AlwaysKeep specifies a set of GVKs that the garbage collector should always +// keep - i.e. should never garbage collect. Typically this will be the XR's +// GVK, and that of a CompositionRevision. +func AlwaysKeep(gvks ...schema.GroupVersionKind) GarbageCollectorOption { + return func(gc *GarbageCollector) { + gc.alwaysKeep = gvks + } +} + +// NewGarbageCollector creates a new watch garbage collector for a controller. +func NewGarbageCollector(name string, of resource.CompositeKind, c client.Reader, ce ControllerEngine, o ...GarbageCollectorOption) *GarbageCollector { + gc := &GarbageCollector{ + controllerName: name, + xrGVK: schema.GroupVersionKind(of), + c: c, + ce: ce, + log: logging.NewNopLogger(), + } + for _, fn := range o { + fn(gc) + } + return gc +} + +// GarbageCollectWatches runs garbage collection at the specified interval, +// until the supplied context is cancelled. It stops any watches for resource +// types that are no longer composed by any composite resource (XR). It also +// removed any informers that no loner power any watches. +func (gc *GarbageCollector) GarbageCollectWatches(ctx context.Context, interval time.Duration) { + t := time.NewTicker(interval) + defer t.Stop() + + for { + select { + case <-ctx.Done(): + gc.log.Debug("Stopping composite controller watch garbage collector", "error", ctx.Err()) + return + case <-t.C: + if err := gc.GarbageCollectWatchesNow(ctx); err != nil { + gc.log.Info("Cannot garbage collect composite controller watches", "error", err) + } + } + } +} + +// GarbageCollectWatchesNow stops any watches for resource types that are no +// longer composed by any composite resource (XR). It also removed any informers +// that no longer power any watches. +func (gc *GarbageCollector) GarbageCollectWatchesNow(ctx context.Context) error { + // List all XRs of the type we're interested in. + l := &kunstructured.UnstructuredList{} + l.SetAPIVersion(gc.xrGVK.GroupVersion().String()) + l.SetKind(gc.xrGVK.Kind + "List") + if err := gc.c.List(ctx, l); err != nil { + return errors.Wrap(err, "cannot list composite resources") + } + + // Build the set of GVKs they still reference. + gvks := make(map[schema.GroupVersionKind]bool) + for _, u := range l.Items { + xr := &composite.Unstructured{Unstructured: u} + for _, ref := range xr.GetResourceReferences() { + gvks[schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind)] = true + } + } + + keep := make([]schema.GroupVersionKind, 0, len(gc.alwaysKeep)+len(gvks)) + keep = append(keep, gc.alwaysKeep...) + for gvk := range gvks { + keep = append(keep, gvk) + } + + // Stop the controller watching any other types of resource. + if err := gc.ce.StopWatches(ctx, gc.controllerName, keep...); err != nil { + return errors.Wrap(err, "cannot stop watches for composed resource types that are no longer referenced by any composite resource") + } + + // Remove any informers that are no longer used by any controller. + return errors.Wrap(gc.ce.RemoveUnwatchedInformers(ctx), "cannot remove informers that are no longer backing a watch by any controller") +} diff --git a/internal/controller/apiextensions/controller/options.go b/internal/controller/apiextensions/controller/options.go index 57c42c72392..a4180a79422 100644 --- a/internal/controller/apiextensions/controller/options.go +++ b/internal/controller/apiextensions/controller/options.go @@ -18,8 +18,11 @@ limitations under the License. package controller import ( + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane/crossplane/internal/controller/engine" "github.com/crossplane/crossplane/internal/xfn" ) @@ -27,6 +30,15 @@ import ( type Options struct { controller.Options + // ControllerClient used by all dynamic controllers. + ControllerClient client.Client + + // ControllerFieldIndexer used to add indexes to the ControllerClient. + ControllerFieldIndexer client.FieldIndexer + + // ControllerEngine used to dynamically start and stop controllers. + ControllerEngine *engine.ControllerEngine + // FunctionRunner used to run Composition Functions. FunctionRunner *xfn.PackagedFunctionRunner } diff --git a/internal/controller/apiextensions/definition/composed.go b/internal/controller/apiextensions/definition/composed.go deleted file mode 100644 index bb87b88b5f1..00000000000 --- a/internal/controller/apiextensions/definition/composed.go +++ /dev/null @@ -1,287 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package definition - -import ( - "context" - "strings" - "sync" - - "github.com/google/uuid" - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - kcache "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - cache "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/crossplane/crossplane-runtime/pkg/logging" - - "github.com/crossplane/crossplane/internal/controller/engine" - "github.com/crossplane/crossplane/internal/xcrd" -) - -// composedResourceInformers manages composed resource informers referenced by -// composite resources. It serves as an event source for realtime notifications -// of changed composed resources, with the composite reconcilers as sinks. -// It keeps composed resource informers alive as long as there are composites -// referencing them. In parallel, the composite reconcilers keep track of -// references to composed resources, and inform composedResourceInformers about -// them via the WatchComposedResources method. -type composedResourceInformers struct { - log logging.Logger - cluster cluster.Cluster - - handler handler.EventHandler - ps []predicate.Predicate - - gvkRoutedCache *engine.GVKRoutedCache - - lock sync.RWMutex // everything below is protected by this lock - - // cdCaches holds the composed resource informers. These are dynamically - // started and stopped based on the composites that reference them. - cdCaches map[schema.GroupVersionKind]cdCache - // xrCaches holds the composite resource informers. We use them to find - // composites referencing a certain composed resource GVK. If no composite - // is left doing so, a composed resource informer is stopped. - xrCaches map[schema.GroupVersionKind]cache.Cache - sinks map[string]func(ev runtimeevent.UpdateEvent) // by some uid -} - -type cdCache struct { - cache cache.Cache - cancelFn context.CancelFunc -} - -var _ source.Source = &composedResourceInformers{} - -// Start implements source.Source, i.e. starting composedResourceInformers as -// source with h as the sink of update events. It keeps sending events until -// ctx is done. -// Note that Start can be called multiple times to deliver events to multiple -// (composite resource) controllers. -func (i *composedResourceInformers) Start(ctx context.Context, q workqueue.RateLimitingInterface) error { - id := uuid.New().String() - - i.lock.Lock() - defer i.lock.Unlock() - i.sinks[id] = func(ev runtimeevent.UpdateEvent) { - for _, p := range i.ps { - if !p.Update(ev) { - return - } - } - i.handler.Update(ctx, ev, q) - } - - go func() { - <-ctx.Done() - - i.lock.Lock() - defer i.lock.Unlock() - delete(i.sinks, id) - }() - - return nil -} - -// RegisterComposite registers a composite resource cache with its GVK. -// Instances of this GVK will be considered to keep composed resource informers -// alive. -func (i *composedResourceInformers) RegisterComposite(gvk schema.GroupVersionKind, ca cache.Cache) { - i.lock.Lock() - defer i.lock.Unlock() - - if i.xrCaches == nil { - i.xrCaches = make(map[schema.GroupVersionKind]cache.Cache) - } - i.xrCaches[gvk] = ca -} - -// UnregisterComposite removes a composite resource cache from being considered -// to keep composed resource informers alive. -func (i *composedResourceInformers) UnregisterComposite(gvk schema.GroupVersionKind) { - i.lock.Lock() - defer i.lock.Unlock() - delete(i.xrCaches, gvk) -} - -// WatchComposedResources starts informers for the given composed resource GVKs. -// The is wired into the composite reconciler, which will call this method on -// every reconcile to make composedResourceInformers aware of the composed -// resources the given composite resource references. -// -// Note that this complements cleanupComposedResourceInformers which regularly -// garbage collects composed resource informers that are no longer referenced by -// any composite. -func (i *composedResourceInformers) WatchComposedResources(gvks ...schema.GroupVersionKind) { - i.lock.RLock() - defer i.lock.RUnlock() - - // start new informers - for _, gvk := range gvks { - if _, found := i.cdCaches[gvk]; found { - continue - } - - log := i.log.WithValues("gvk", gvk.String()) - - ca, err := cache.New(i.cluster.GetConfig(), cache.Options{}) - if err != nil { - log.Debug("failed creating a cache", "error", err) - continue - } - - // don't forget to call cancelFn in error cases to avoid leaks. In the - // happy case it's called from the go routine starting the cache below. - ctx, cancelFn := context.WithCancel(context.Background()) - - u := kunstructured.Unstructured{} - u.SetGroupVersionKind(gvk) - inf, err := ca.GetInformer(ctx, &u, cache.BlockUntilSynced(false)) // don't block. We wait in the go routine below. - if err != nil { - cancelFn() - log.Debug("failed getting informer", "error", err) - continue - } - - if _, err := inf.AddEventHandler(kcache.ResourceEventHandlerFuncs{ - UpdateFunc: func(oldObj, newObj interface{}) { - old := oldObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. - obj := newObj.(client.Object) //nolint:forcetypeassert // Will always be client.Object. - if old.GetResourceVersion() == obj.GetResourceVersion() { - return - } - - i.lock.RLock() - defer i.lock.RUnlock() - - ev := runtimeevent.UpdateEvent{ - ObjectOld: old, - ObjectNew: obj, - } - for _, handleFn := range i.sinks { - handleFn(ev) - } - }, - }); err != nil { - cancelFn() - log.Debug("failed adding event handler", "error", err) - continue - } - - go func() { - defer cancelFn() - - log.Info("Starting composed resource watch") - _ = ca.Start(ctx) - }() - - // TODO(negz): We should take a write lock before writing to this map. - i.cdCaches[gvk] = cdCache{ - cache: ca, - cancelFn: cancelFn, - } - - // wait for in the background, and only when synced add to the routed cache - go func(gvk schema.GroupVersionKind) { - if synced := ca.WaitForCacheSync(ctx); synced { - log.Debug("Composed resource cache synced") - i.gvkRoutedCache.AddDelegate(gvk, ca) - } - }(gvk) - } -} - -// cleanupComposedResourceInformers garbage collects composed resource informers -// that are no longer referenced by any composite resource. -// -// Note that this complements WatchComposedResources which starts informers for -// the composed resources referenced by a composite resource. -func (i *composedResourceInformers) cleanupComposedResourceInformers(ctx context.Context) { - crds := extv1.CustomResourceDefinitionList{} - if err := i.cluster.GetClient().List(ctx, &crds); err != nil { - i.log.Debug(errListCRDs, "error", err) - return - } - - // copy map to avoid locking it for the entire duration of the loop - xrCaches := make(map[schema.GroupVersionKind]cache.Cache, len(i.xrCaches)) - i.lock.RLock() - for gvk, ca := range i.xrCaches { - xrCaches[gvk] = ca - } - i.lock.RUnlock() - - // find all CRDs that some XR is referencing. This is O(CRDs * XRDs * versions). - // In practice, CRDs are 1000ish max, and XRDs are 10ish. So this is - // fast enough for now. It's all in-memory. - referenced := make(map[schema.GroupVersionKind]bool) - for _, crd := range crds.Items { - if !xcrd.IsEstablished(crd.Status) { - continue - } - - for _, v := range crd.Spec.Versions { - cdGVK := schema.GroupVersionKind{Group: crd.Spec.Group, Version: v.Name, Kind: crd.Spec.Names.Kind} - for xrGVK, xrCache := range xrCaches { - // list composites that reference this composed GVK - list := kunstructured.UnstructuredList{} - list.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) - if err := xrCache.List(ctx, &list, client.MatchingFields{compositeResourceRefGVKsIndex: cdGVK.String()}); err != nil { - i.log.Debug("cannot list composite resources referencing a certain composed resource GVK", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourceRefGVKsIndex+"="+cdGVK.String()) - continue - } - if len(list.Items) > 0 { - referenced[cdGVK] = true - } - } - } - } - - // stop old informers - for gvk, inf := range i.cdCaches { - if referenced[gvk] { - continue - } - inf.cancelFn() - i.gvkRoutedCache.RemoveDelegate(gvk) - i.log.Info("Stopped composed resource watch", "gvk", gvk.String()) - - // TODO(negz): We should take a write lock before writing to this map. - delete(i.cdCaches, gvk) - } -} - -func parseAPIVersion(v string) (string, string) { - parts := strings.SplitN(v, "/", 2) - switch len(parts) { - case 1: - return "", parts[0] - case 2: - return parts[0], parts[1] - default: - return "", "" - } -} diff --git a/internal/controller/apiextensions/definition/handlers.go b/internal/controller/apiextensions/definition/handlers.go new file mode 100644 index 00000000000..2b7b1b6030d --- /dev/null +++ b/internal/controller/apiextensions/definition/handlers.go @@ -0,0 +1,113 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package definition + +import ( + "context" + + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + kevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" +) + +// EnqueueForCompositionRevision enqueues reconciles for all XRs that will use a +// newly created CompositionRevision. +func EnqueueForCompositionRevision(of resource.CompositeKind, c client.Reader, log logging.Logger) handler.Funcs { + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e kevent.CreateEvent, q workqueue.RateLimitingInterface) { + rev, ok := e.Object.(*v1.CompositionRevision) + if !ok { + // should not happen + return + } + + // TODO(negz): Check whether the revision's compositeTypeRef matches + // the supplied CompositeKind. If it doesn't, we can return early. + + // get all XRs + xrs := kunstructured.UnstructuredList{} + xrs.SetGroupVersionKind(schema.GroupVersionKind(of)) + xrs.SetKind(schema.GroupVersionKind(of).Kind + "List") + if err := c.List(ctx, &xrs); err != nil { + // logging is most we can do here. This is a programming error if it happens. + log.Info("cannot list in CompositionRevision handler", "type", schema.GroupVersionKind(of).String(), "error", err) + return + } + + // enqueue all those that reference the Composition of this revision + compName := rev.Labels[v1.LabelCompositionName] + if compName == "" { + return + } + for _, u := range xrs.Items { + xr := composite.Unstructured{Unstructured: u} + + // only automatic + if pol := xr.GetCompositionUpdatePolicy(); pol != nil && *pol == xpv1.UpdateManual { + continue + } + + // only those that reference the right Composition + if ref := xr.GetCompositionReference(); ref == nil || ref.Name != compName { + continue + } + + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: xr.GetName(), + Namespace: xr.GetNamespace(), + }}) + } + }, + } +} + +// EnqueueCompositeResources enqueues reconciles for all XRs that reference an +// updated composed resource. +func EnqueueCompositeResources(of resource.CompositeKind, c client.Reader, log logging.Logger) handler.Funcs { + return handler.Funcs{ + UpdateFunc: func(ctx context.Context, ev kevent.UpdateEvent, q workqueue.RateLimitingInterface) { + xrGVK := schema.GroupVersionKind(of) + cdGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() + key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), cdGVK.Kind, cdGVK.GroupVersion().String()) + + composites := kunstructured.UnstructuredList{} + composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) + if err := c.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { + log.Debug("cannot list composite resources related to a composed resource change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) + return + } + + // queue those composites for reconciliation + for _, xr := range composites.Items { + log.Debug("Enqueueing composite resource because composed resource changed", "name", xr.GetName(), "cdGVK", cdGVK.String(), "cdName", ev.ObjectNew.GetName()) + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) + } + }, + } +} diff --git a/internal/controller/apiextensions/definition/handlers_test.go b/internal/controller/apiextensions/definition/handlers_test.go new file mode 100644 index 00000000000..9992c0931d3 --- /dev/null +++ b/internal/controller/apiextensions/definition/handlers_test.go @@ -0,0 +1,237 @@ +package definition + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + kevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + "github.com/crossplane/crossplane-runtime/pkg/test" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" +) + +func TestEnqueueForCompositionRevisionFunc(t *testing.T) { + type args struct { + of schema.GroupVersionKind + reader client.Reader + event kevent.CreateEvent + } + type want struct { + added []interface{} + } + + dog := schema.GroupVersionKind{Group: "example.com", Version: "v1", Kind: "Dog"} + dogList := dog.GroupVersion().WithKind("DogList") + + tests := map[string]struct { + reason string + args args + want want + }{ + "NoXRs": { + reason: "If there are no XRs of the specified type, no reconciles should be enqueued.", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { + // test parameters only here, not in the later tests for brevity. + u, ok := list.(*kunstructured.UnstructuredList) + if !ok { + t.Errorf("list was not an UnstructuredList") + } else if got := u.GroupVersionKind(); got != dogList { + t.Errorf("list was not a DogList, got: %s", got) + } + if len(opts) != 0 { + t.Errorf("unexpected list options: %#v", opts) + } + return nil + }, + }, + }, + }, + "AutomaticManagementPolicy": { + reason: "A reconcile should be enqueued for XRs with an automatic revision update policy.", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + }, + event: kevent.CreateEvent{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{ + added: []interface{}{reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: "ns", + Name: "obj1", + }}}, + }, + }, + "ManualManagementPolicy": { + reason: "A reconcile shouldn't be enqueued for XRs with a manual revision update policy.", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateManual + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + }, + event: kevent.CreateEvent{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{}, + }, + "OtherComposition": { + reason: "A reconcile shouldn't be enqueued for an XR that references a different Composition", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + }, + event: kevent.CreateEvent{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{}, + }, + "Multiple": { + reason: "Reconciles should be enqueued only for the XRs that reference the relevant Composition, and have an automatic composition revision update policy.", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + automatic := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&automatic) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + obj2 := obj1.DeepCopy() + obj2.SetName("obj2") + + obj3 := obj1.DeepCopy() + obj3.SetName("obj3") + obj3.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) + + obj4 := obj1.DeepCopy() + obj4.SetName("obj4") + manual := xpv1.UpdateManual + obj4.SetCompositionUpdatePolicy(&manual) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ + obj1.Unstructured, + obj2.Unstructured, + obj3.Unstructured, + } + + return nil + }, + }, + event: kevent.CreateEvent{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{ + added: []interface{}{ + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj1"}}, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj2"}}, + }, + }, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + fns := EnqueueForCompositionRevision(resource.CompositeKind(tc.args.of), tc.args.reader, logging.NewNopLogger()) + q := rateLimitingQueueMock{} + fns.Create(context.TODO(), tc.args.event, &q) + + if diff := cmp.Diff(tc.want.added, q.added); diff != "" { + t.Errorf("\n%s\nfns.Create(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} + +type rateLimitingQueueMock struct { + workqueue.RateLimitingInterface + added []interface{} +} + +func (f *rateLimitingQueueMock) Add(item interface{}) { + f.added = append(f.added, item) +} diff --git a/internal/controller/apiextensions/definition/indexes.go b/internal/controller/apiextensions/definition/indexes.go index 6aaca2cd81c..bcc34519b36 100644 --- a/internal/controller/apiextensions/definition/indexes.go +++ b/internal/controller/apiextensions/definition/indexes.go @@ -17,46 +17,29 @@ limitations under the License. package definition import ( + "context" "fmt" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" ) const ( - // compositeResourceRefGVKsIndex is an index of all GroupKinds that - // are in use by a Composition. It indexes from spec.resourceRefs, not - // from spec.resources. Hence, it will also work with functions. - compositeResourceRefGVKsIndex = "compositeResourceRefsGVKs" // compositeResourcesRefsIndex is an index of resourceRefs that are owned // by a composite. compositeResourcesRefsIndex = "compositeResourcesRefs" ) -var ( - _ client.IndexerFunc = IndexCompositeResourceRefGVKs - _ client.IndexerFunc = IndexCompositeResourcesRefs -) +var _ client.IndexerFunc = IndexCompositeResourcesRefs -// IndexCompositeResourceRefGVKs assumes the passed object is a composite. It -// returns gvk keys for every resource referenced in the composite. -func IndexCompositeResourceRefGVKs(o client.Object) []string { - u, ok := o.(*kunstructured.Unstructured) - if !ok { - return nil // should never happen - } - xr := composite.Unstructured{Unstructured: *u} - refs := xr.GetResourceReferences() - keys := make([]string, 0, len(refs)) - for _, ref := range refs { - group, version := parseAPIVersion(ref.APIVersion) - keys = append(keys, schema.GroupVersionKind{Group: group, Version: version, Kind: ref.Kind}.String()) - } - // unification is done by the informer. - return keys +// AddIndexes adds composite resource related indexes to the supplied +// FieldIndexer. +func AddIndexes(ctx context.Context, fi client.FieldIndexer) error { + u := &kunstructured.Unstructured{} + return errors.Wrap(fi.IndexField(ctx, u, compositeResourcesRefsIndex, IndexCompositeResourcesRefs), "cannot add cache index for composed resources") } // IndexCompositeResourcesRefs assumes the passed object is a composite. It @@ -78,26 +61,3 @@ func IndexCompositeResourcesRefs(o client.Object) []string { func refKey(ns, name, kind, apiVersion string) string { return fmt.Sprintf("%s.%s.%s.%s", name, ns, kind, apiVersion) } - -// TODO(negz): Figure out a way to plumb this with controller-runtime v0.18.x -// style sources. - -// func enqueueXRsForMR(ca cache.Cache, xrGVK schema.GroupVersionKind, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { -// return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { -// mrGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() -// key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), mrGVK.Kind, mrGVK.GroupVersion().String()) -// -// composites := kunstructured.UnstructuredList{} -// composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) -// if err := ca.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { -// log.Debug("cannot list composite resources related to a MR change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) -// return -// } -// -// // queue those composites for reconciliation -// for _, xr := range composites.Items { -// log.Info("Enqueueing composite resource because managed resource changed", "name", xr.GetName(), "mrGVK", mrGVK.String(), "mrName", ev.ObjectNew.GetName()) -// q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) -// } -// } -// } diff --git a/internal/controller/apiextensions/definition/indexes_test.go b/internal/controller/apiextensions/definition/indexes_test.go index 3e725ddc093..40fc1315ea5 100644 --- a/internal/controller/apiextensions/definition/indexes_test.go +++ b/internal/controller/apiextensions/definition/indexes_test.go @@ -25,50 +25,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func TestIndexCompositeResourceRefGVKs(t *testing.T) { - type args struct { - object client.Object - } - tests := map[string]struct { - args args - want []string - }{ - "Nil": {args: args{object: nil}, want: nil}, - "NotUnstructured": {args: args{object: &corev1.Pod{}}, want: nil}, - "NoRefs": {args: args{object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]interface{}{}, - }, - }}, want: []string{}}, - "References": {args: args{object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]interface{}{ - "resourceRefs": []interface{}{ - map[string]interface{}{ - "apiVersion": "nop.crossplane.io/v1alpha1", - "kind": "NopResource", - "name": "mr", - }, - map[string]interface{}{ - "apiVersion": "nop.example.org/v1alpha1", - "kind": "NopResource", - "name": "xr", - }, - }, - }, - }, - }}, want: []string{"nop.crossplane.io/v1alpha1, Kind=NopResource", "nop.example.org/v1alpha1, Kind=NopResource"}}, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := IndexCompositeResourceRefGVKs(tc.args.object) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("\n%s\nIndexCompositeResourceRefGVKs(...): -want, +got:\n%s", name, diff) - } - }) - } -} - func TestIndexCompositeResourcesRefs(t *testing.T) { type args struct { object client.Object diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 6c7b73d04a4..de9016e73d2 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -30,17 +30,11 @@ import ( kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crossplane/crossplane-runtime/pkg/connection" "github.com/crossplane/crossplane-runtime/pkg/controller" @@ -56,6 +50,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/apis/secrets/v1alpha1" "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" + "github.com/crossplane/crossplane/internal/controller/apiextensions/composite/watch" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" "github.com/crossplane/crossplane/internal/controller/engine" "github.com/crossplane/crossplane/internal/features" @@ -72,6 +67,8 @@ const ( errApplyCRD = "cannot apply rendered composite resource CustomResourceDefinition" errUpdateStatus = "cannot update status of CompositeResourceDefinition" errStartController = "cannot start composite resource controller" + errStopController = "cannot stop composite resource controller" + errStartWatches = "cannot start composite resource controller watches" errAddIndex = "cannot add composite GVK index" errAddFinalizer = "cannot add composite resource finalizer" errRemoveFinalizer = "cannot remove composite resource finalizer" @@ -96,14 +93,38 @@ const ( ) // A ControllerEngine can start and stop Kubernetes controllers on demand. -type ControllerEngine interface { +type ControllerEngine interface { //nolint:interfacebloat // Only one method over the limit. + Start(name string, o ...engine.ControllerOption) error + Stop(ctx context.Context, name string) error IsRunning(name string) bool - Create(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) - Start(name string, o kcontroller.Options, w ...engine.Watch) error - Stop(name string) - Err(name string) error + StartWatches(name string, ws ...engine.Watch) error + StopWatches(ctx context.Context, name string, keep ...schema.GroupVersionKind) error + RemoveUnwatchedInformers(ctx context.Context) error } +// A NopEngine does nothing. +type NopEngine struct{} + +// Start does nothing. +func (e *NopEngine) Start(_ string, _ ...engine.ControllerOption) error { return nil } + +// Stop does nothing. +func (e *NopEngine) Stop(_ context.Context, _ string) error { return nil } + +// IsRunning always returns true. +func (e *NopEngine) IsRunning(_ string) bool { return true } + +// StartWatches does nothing. +func (e *NopEngine) StartWatches(_ string, _ ...engine.Watch) error { return nil } + +// StopWatches does nothing. +func (e *NopEngine) StopWatches(_ context.Context, _ string, _ ...schema.GroupVersionKind) error { + return nil +} + +// RemoveUnwatchedInformers does nothing. +func (e *NopEngine) RemoveUnwatchedInformers(_ context.Context) error { return nil } + // A CRDRenderer renders a CompositeResourceDefinition's corresponding // CustomResourceDefinition. type CRDRenderer interface { @@ -125,24 +146,12 @@ func (fn CRDRenderFn) Render(d *v1.CompositeResourceDefinition) (*extv1.CustomRe func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { name := "defined/" + strings.ToLower(v1.CompositeResourceDefinitionGroupKind) - r := NewReconciler(mgr, + r := NewReconciler(NewClientApplicator(o.ControllerClient), WithLogger(o.Logger.WithValues("controller", name)), WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + WithControllerEngine(o.ControllerEngine), WithOptions(o)) - if o.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // Register a runnable regularly checking whether the watch composed - // resources are still referenced by composite resources. If not, the - // composed resource informer is stopped. - if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { - // Run every minute. - wait.UntilWithContext(ctx, r.xrInformers.cleanupComposedResourceInformers, time.Minute) - return nil - })); err != nil { - return errors.Wrap(err, errCannotAddInformerLoopToManager) - } - } - return ctrl.NewControllerManagedBy(mgr). Named(name). For(&v1.CompositeResourceDefinition{}). @@ -158,7 +167,6 @@ type ReconcilerOption func(*Reconciler) func WithLogger(log logging.Logger) ReconcilerOption { return func(r *Reconciler) { r.log = log - r.xrInformers.log = log } } @@ -189,60 +197,54 @@ func WithFinalizer(f resource.Finalizer) ReconcilerOption { // lifecycles of composite controllers. func WithControllerEngine(c ControllerEngine) ReconcilerOption { return func(r *Reconciler) { - r.composite.ControllerEngine = c + r.controller = c } } -// WithCRDRenderer specifies how the Reconciler should render an -// CompositeResourceDefinition's corresponding CustomResourceDefinition. -func WithCRDRenderer(c CRDRenderer) ReconcilerOption { +// WithFieldIndexer specifies the FieldIndexer that should be used to +// add indexes to the Reconciler's client. +func WithFieldIndexer(fi client.FieldIndexer) ReconcilerOption { return func(r *Reconciler) { - r.composite.CRDRenderer = c + r.indexer = fi } } -// WithClientApplicator specifies how the Reconciler should interact with the -// Kubernetes API. -func WithClientApplicator(ca resource.ClientApplicator) ReconcilerOption { +// WithCRDRenderer specifies how the Reconciler should render an +// CompositeResourceDefinition's corresponding CustomResourceDefinition. +func WithCRDRenderer(c CRDRenderer) ReconcilerOption { return func(r *Reconciler) { - r.client = ca + r.composite.CRDRenderer = c } } type definition struct { CRDRenderer - ControllerEngine resource.Finalizer } -// NewReconciler returns a Reconciler of CompositeResourceDefinitions. -func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - - ca := engine.NewGVKRoutedCache(mgr.GetScheme(), mgr.GetCache()) +// NewClientApplicator returns a ClientApplicator suitable for use by the +// definition controller. +func NewClientApplicator(c client.Client) resource.ClientApplicator { + // TODO(negz): Can we avoid using this wrapper client? + // TODO(negz): Use server-side apply instead of a ClientApplicator. + uc := unstructured.NewClient(c) + return resource.ClientApplicator{ + Client: uc, + Applicator: resource.NewAPIUpdatingApplicator(uc), + } +} +// NewReconciler returns a Reconciler of CompositeResourceDefinitions. +func NewReconciler(ca resource.ClientApplicator, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - mgr: mgr, - - client: resource.ClientApplicator{ - Client: kube, - Applicator: resource.NewAPIUpdatingApplicator(kube), - }, + client: ca, composite: definition{ - CRDRenderer: CRDRenderFn(xcrd.ForCompositeResource), - ControllerEngine: engine.New(mgr), - Finalizer: resource.NewAPIFinalizer(kube, finalizer), + CRDRenderer: CRDRenderFn(xcrd.ForCompositeResource), + Finalizer: resource.NewAPIFinalizer(ca, finalizer), }, - xrInformers: composedResourceInformers{ - log: logging.NewNopLogger(), - cluster: mgr, - - gvkRoutedCache: ca, - cdCaches: make(map[schema.GroupVersionKind]cdCache), - sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), - }, + controller: &NopEngine{}, log: logging.NewNopLogger(), record: event.NewNopRecorder(), @@ -256,27 +258,21 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { f(r) } - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // wrap the manager's cache to route requests to dynamically started - // informers for managed resources. - r.mgr = engine.WithGVKRoutedCache(ca, mgr) - } - return r } // A Reconciler reconciles CompositeResourceDefinitions. type Reconciler struct { - client resource.ClientApplicator - mgr manager.Manager + client resource.ClientApplicator + indexer client.FieldIndexer // Should add indexes to the client above. composite definition + controller ControllerEngine + log logging.Logger record event.Recorder - xrInformers composedResourceInformers - options apiextensionscontroller.Options } @@ -337,11 +333,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // the (presumably exceedingly rare) latter case we'll orphan // the CRD. if !meta.WasCreated(crd) || !metav1.IsControlledBy(crd, d) { - // It's likely that we've already stopped this - // controller on a previous reconcile, but we try again - // just in case. This is a no-op if the controller was - // already stopped. - r.stopCompositeController(d) + // It's likely that we've already stopped this controller on a + // previous reconcile, but we try again just in case. This is a + // no-op if the controller was already stopped. + if err := r.controller.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonTerminateXR, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource controller") if err := r.composite.RemoveFinalizer(ctx, d); err != nil { @@ -391,9 +390,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - // The controller should be stopped before the deletion of CRD - // so that it doesn't crash. - r.stopCompositeController(d) + // The controller must be stopped before the deletion of the CRD so that + // it doesn't crash. + if err := r.controller.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonTerminateXR, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource controller") if err := r.client.Delete(ctx, crd); resource.IgnoreNotFound(err) != nil { @@ -441,36 +444,29 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - if err := r.composite.Err(composite.ControllerName(d.GetName())); err != nil { - log.Debug("Composite resource controller encountered an error", "error", err) - } - observed := d.Status.Controllers.CompositeResourceTypeRef desired := v1.TypeReferenceTo(d.GetCompositeGroupVersionKind()) - switch { - case observed.APIVersion != "" && observed != desired: - r.stopCompositeController(d) + if observed.APIVersion != "" && observed != desired { + if err := r.controller.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonEstablishXR, err)) + return reconcile.Result{}, err + } log.Debug("Referenceable version changed; stopped composite resource controller", "observed-version", observed.APIVersion, "desired-version", desired.APIVersion) - case r.composite.IsRunning(composite.ControllerName(d.GetName())): + } + + if r.controller.IsRunning(composite.ControllerName(d.GetName())) { log.Debug("Composite resource controller is running") d.Status.SetConditions(v1.WatchingComposite()) return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) - default: - if err := r.composite.Err(composite.ControllerName(d.GetName())); err != nil { - log.Debug("Composite resource controller encountered an error. Going to restart it", "error", err) - } else { - log.Debug("Composite resource controller is not running. Going to start it") - } } - ro := CompositeReconcilerOptions(r.options, d, r.client, r.log, r.record) + ro := r.CompositeReconcilerOptions(ctx, d) ck := resource.CompositeKind(d.GetCompositeGroupVersionKind()) - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - ro = append(ro, composite.WithKindObserver(composite.KindObserverFunc(r.xrInformers.WatchComposedResources))) - } - cr := composite.NewReconciler(r.mgr, ck, ro...) + + cr := composite.NewReconciler(r.client, ck, ro...) ko := r.options.ForControllerRuntime() // Most controllers use this type of rate limiter to backoff requeues from 1 @@ -486,106 +482,75 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco u := &kunstructured.Unstructured{} u.SetGroupVersionKind(xrGVK) - name := composite.ControllerName(d.GetName()) - var ca cache.Cache - watches := []engine.Watch{ - engine.WatchFor(u, &handler.EnqueueRequestForObject{}), - // enqueue composites whenever a matching CompositionRevision is created - engine.WatchTriggeredBy(source.Kind(r.mgr.GetCache(), &v1.CompositionRevision{}, handler.TypedFuncs[*v1.CompositionRevision]{ - CreateFunc: composite.EnqueueForCompositionRevisionFunc(ck, r.mgr.GetCache().List, r.log), - })), + + // TODO(negz): Update CompositeReconcilerOptions to produce + // ControllerOptions instead? It bothers me that this is the only feature + // flagged block outside that method. + co := []engine.ControllerOption{engine.WithRuntimeOptions(ko)} + if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { + // If realtime composition is enabled we'll start watches dynamically, + // so we want to garbage collect watches that aren't used. + gc := watch.NewGarbageCollector(name, resource.CompositeKind(xrGVK), r.client, r.controller, + watch.WithLogger(log), + watch.AlwaysKeep(xrGVK, v1.CompositionRevisionGroupVersionKind), + ) + co = append(co, engine.WithWatchGarbageCollector(gc)) } - // TODO(negz): I can't find a great way to plumb this. We now need to pass - // the handler when creating the source (i.e. the xrInformers). The - // xrInformers is designed to handle multiple types, though. Given I plan to - // try refactor realtime compositions to make use of new controller-runtime - // functionality around stopping informers, I'm going to just comment it out - // rather than spend time getting it working. - - // if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // // enqueue XRs that when a relevant MR is updated - // watches = append(watches, engine.WatchTriggeredBy(&r.xrInformers, handler.TypedFuncs[*kunstructured.Unstructured]{ - // UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent[*kunstructured.Unstructured], q workqueue.RateLimitingInterface) { - // enqueueXRsForMR(ca, xrGVK, log)(ctx, ev, q) - // }, - // })) - // } - - c, err := r.composite.Create(name, ko, watches...) - if err != nil { + if err := r.controller.Start(name, co...); err != nil { log.Debug(errStartController, "error", err) err = errors.Wrap(err, errStartController) r.record.Event(d, event.Warning(reasonEstablishXR, err)) return reconcile.Result{}, err } - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - ca = c.GetCache() - if err := ca.IndexField(ctx, u, compositeResourceRefGVKsIndex, IndexCompositeResourceRefGVKs); err != nil { - log.Debug(errAddIndex, "error", err) - // Nothing we can do. At worst, we won't have realtime updates. - } - if err := ca.IndexField(ctx, u, compositeResourcesRefsIndex, IndexCompositeResourcesRefs); err != nil { - log.Debug(errAddIndex, "error", err) - // Nothing we can do. At worst, we won't have realtime updates. - } - } - - if err := c.Start(context.Background()); err != nil { //nolint:contextcheck // the controller actually runs in the background. - log.Debug(errStartController, "error", err) - err = errors.Wrap(err, errStartController) + if err := r.controller.StartWatches(name, + engine.WatchFor(u, &handler.EnqueueRequestForObject{}), + engine.WatchFor(&v1.CompositionRevision{}, EnqueueForCompositionRevision(resource.CompositeKind(xrGVK), r.client, log)), + ); err != nil { + log.Debug(errStartWatches, "error", err) + err = errors.Wrap(err, errStartWatches) r.record.Event(d, event.Warning(reasonEstablishXR, err)) return reconcile.Result{}, err } - log.Debug("(Re)started composite resource controller") - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - r.xrInformers.RegisterComposite(xrGVK, ca) - } + log.Debug("Started composite resource controller") d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReferenceTo(d.GetCompositeGroupVersionKind()) d.Status.SetConditions(v1.WatchingComposite()) return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) } -func (r *Reconciler) stopCompositeController(d *v1.CompositeResourceDefinition) { - r.composite.Stop(composite.ControllerName(d.GetName())) - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - r.xrInformers.UnregisterComposite(d.GetCompositeGroupVersionKind()) - } -} - // CompositeReconcilerOptions builds the options for a composite resource // reconciler. The options vary based on the supplied feature flags. -func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.CompositeResourceDefinition, c client.Client, l logging.Logger, e event.Recorder) []composite.ReconcilerOption { +func (r *Reconciler) CompositeReconcilerOptions(ctx context.Context, d *v1.CompositeResourceDefinition) []composite.ReconcilerOption { // The default set of reconciler options when no feature flags are enabled. o := []composite.ReconcilerOption{ - composite.WithConnectionPublishers(composite.NewAPIFilteredSecretPublisher(c, d.GetConnectionSecretKeys())), + composite.WithConnectionPublishers(composite.NewAPIFilteredSecretPublisher(r.client, d.GetConnectionSecretKeys())), composite.WithCompositionSelector(composite.NewCompositionSelectorChain( - composite.NewEnforcedCompositionSelector(*d, e), - composite.NewAPIDefaultCompositionSelector(c, *meta.ReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind), e), - composite.NewAPILabelSelectorResolver(c), + composite.NewEnforcedCompositionSelector(*d, r.record), + composite.NewAPIDefaultCompositionSelector(r.client, *meta.ReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind), r.record), + composite.NewAPILabelSelectorResolver(r.client), )), - composite.WithLogger(l.WithValues("controller", composite.ControllerName(d.GetName()))), - composite.WithRecorder(e.WithAnnotations("controller", composite.ControllerName(d.GetName()))), - composite.WithPollInterval(co.PollInterval), + composite.WithLogger(r.log.WithValues("controller", composite.ControllerName(d.GetName()))), + composite.WithRecorder(r.record.WithAnnotations("controller", composite.ControllerName(d.GetName()))), + composite.WithPollInterval(r.options.PollInterval), } // We only want to enable Composition environment support if the relevant // feature flag is enabled. Otherwise we will default to noop selector and // fetcher that will always return nil. All environment features are // subsequently skipped if the environment is nil. - if co.Features.Enabled(features.EnableAlphaEnvironmentConfigs) { + if r.options.Features.Enabled(features.EnableAlphaEnvironmentConfigs) { o = append(o, - composite.WithEnvironmentSelector(composite.NewAPIEnvironmentSelector(c)), - composite.WithEnvironmentFetcher(composite.NewAPIEnvironmentFetcher(c))) + composite.WithEnvironmentSelector(composite.NewAPIEnvironmentSelector(r.client)), + composite.WithEnvironmentFetcher(composite.NewAPIEnvironmentFetcher(r.client))) } // If external secret stores aren't enabled we just fetch connection details // from Kubernetes secrets. - var fetcher managed.ConnectionDetailsFetcher = composite.NewSecretConnectionDetailsFetcher(c) + var fetcher managed.ConnectionDetailsFetcher = composite.NewSecretConnectionDetailsFetcher(r.client) // We only want to enable ExternalSecretStore support if the relevant // feature flag is enabled. Otherwise, we start the XR reconcilers with @@ -593,48 +558,48 @@ func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.Compos // We also add a new Configurator for ExternalSecretStore which basically // reflects PublishConnectionDetailsWithStoreConfigRef in Composition to // the composite resource. - if co.Features.Enabled(features.EnableAlphaExternalSecretStores) { + if r.options.Features.Enabled(features.EnableAlphaExternalSecretStores) { pc := []managed.ConnectionPublisher{ - composite.NewAPIFilteredSecretPublisher(c, d.GetConnectionSecretKeys()), - composite.NewSecretStoreConnectionPublisher(connection.NewDetailsManager(c, v1alpha1.StoreConfigGroupVersionKind, - connection.WithTLSConfig(co.ESSOptions.TLSConfig)), d.GetConnectionSecretKeys()), + composite.NewAPIFilteredSecretPublisher(r.client, d.GetConnectionSecretKeys()), + composite.NewSecretStoreConnectionPublisher(connection.NewDetailsManager(r.client, v1alpha1.StoreConfigGroupVersionKind, + connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), d.GetConnectionSecretKeys()), } // If external secret stores are enabled we need to support fetching // connection details from both secrets and external stores. fetcher = composite.ConnectionDetailsFetcherChain{ - composite.NewSecretConnectionDetailsFetcher(c), - connection.NewDetailsManager(c, v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(co.ESSOptions.TLSConfig)), + composite.NewSecretConnectionDetailsFetcher(r.client), + connection.NewDetailsManager(r.client, v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), } cc := composite.NewConfiguratorChain( - composite.NewAPINamingConfigurator(c), - composite.NewAPIConfigurator(c), - composite.NewSecretStoreConnectionDetailsConfigurator(c), + composite.NewAPINamingConfigurator(r.client), + composite.NewAPIConfigurator(r.client), + composite.NewSecretStoreConnectionDetailsConfigurator(r.client), ) o = append(o, composite.WithConnectionPublishers(pc...), composite.WithConfigurator(cc), - composite.WithComposer(composite.NewPTComposer(c, composite.WithComposedConnectionDetailsFetcher(fetcher)))) + composite.WithComposer(composite.NewPTComposer(r.client, composite.WithComposedConnectionDetailsFetcher(fetcher)))) } // If Composition Functions are enabled we use two different Composer // implementations. One supports P&T (aka 'Resources mode') and the other // Functions (aka 'Pipeline mode'). - if co.Features.Enabled(features.EnableBetaCompositionFunctions) { - ptc := composite.NewPTComposer(c, composite.WithComposedConnectionDetailsFetcher(fetcher)) + if r.options.Features.Enabled(features.EnableBetaCompositionFunctions) { + ptc := composite.NewPTComposer(r.client, composite.WithComposedConnectionDetailsFetcher(fetcher)) fcopts := []composite.FunctionComposerOption{ - composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(c, fetcher)), + composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(r.client, fetcher)), composite.WithCompositeConnectionDetailsFetcher(fetcher), } - if co.Features.Enabled(features.EnableBetaCompositionFunctionsExtraResources) { - fcopts = append(fcopts, composite.WithExtraResourcesFetcher(composite.NewExistingExtraResourcesFetcher(c))) + if r.options.Features.Enabled(features.EnableBetaCompositionFunctionsExtraResources) { + fcopts = append(fcopts, composite.WithExtraResourcesFetcher(composite.NewExistingExtraResourcesFetcher(r.client))) } - fc := composite.NewFunctionComposer(c, co.FunctionRunner, fcopts...) + fc := composite.NewFunctionComposer(r.client, r.options.FunctionRunner, fcopts...) // Note that if external secret stores are enabled this will supersede // the WithComposer option specified in that block. @@ -657,5 +622,22 @@ func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.Compos }))) } + // If realtime compositions are enabled we pass the ControllerEngine to the + // XR reconciler so that it can start watches for composed resources. + if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { + gvk := d.GetCompositeGroupVersionKind() + u := &kunstructured.Unstructured{} + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) + + // The below EnqueueCompositeResources handler uses this index. + if err := r.indexer.IndexField(ctx, u, compositeResourcesRefsIndex, IndexCompositeResourcesRefs); err != nil { + r.log.Debug(errAddIndex, "error", err) + } + + h := EnqueueCompositeResources(resource.CompositeKind(d.GetCompositeGroupVersionKind()), r.client, r.log) + o = append(o, composite.WithWatchStarter(composite.ControllerName(d.GetName()), h, r.controller)) + } + return o } diff --git a/internal/controller/apiextensions/definition/reconciler_test.go b/internal/controller/apiextensions/definition/reconciler_test.go index e89f2322ec9..9d9d2c31d84 100644 --- a/internal/controller/apiextensions/definition/reconciler_test.go +++ b/internal/controller/apiextensions/definition/reconciler_test.go @@ -20,63 +20,52 @@ import ( "context" "testing" - "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlconfig "sigs.k8s.io/controller-runtime/pkg/config" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" "github.com/crossplane/crossplane/internal/controller/engine" - "github.com/crossplane/crossplane/internal/features" +) + +var ( + _ ControllerEngine = &MockEngine{} + _ ControllerEngine = &NopEngine{} ) type MockEngine struct { + MockStart func(name string, o ...engine.ControllerOption) error + MockStop func(ctx context.Context, name string) error + MockIsRunning func(name string) bool + MockStartWatches func(name string, ws ...engine.Watch) error + ControllerEngine - MockIsRunning func(name string) bool - MockCreate func(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) - MockStart func(name string, o kcontroller.Options, w ...engine.Watch) error - MockStop func(name string) - MockErr func(name string) error } func (m *MockEngine) IsRunning(name string) bool { return m.MockIsRunning(name) } -func (m *MockEngine) Create(name string, o kcontroller.Options, w ...engine.Watch) (engine.NamedController, error) { - return m.MockCreate(name, o, w...) +func (m *MockEngine) Start(name string, o ...engine.ControllerOption) error { + return m.MockStart(name, o...) } -func (m *MockEngine) Start(name string, o kcontroller.Options, w ...engine.Watch) error { - return m.MockStart(name, o, w...) +func (m *MockEngine) Stop(ctx context.Context, name string) error { + return m.MockStop(ctx, name) } -func (m *MockEngine) Stop(name string) { - m.MockStop(name) -} - -func (m *MockEngine) Err(name string) error { - return m.MockErr(name) +func (m *MockEngine) StartWatches(name string, ws ...engine.Watch) error { + return m.MockStartWatches(name, ws...) } func TestReconcile(t *testing.T) { @@ -86,7 +75,7 @@ func TestReconcile(t *testing.T) { ctrlr := true type args struct { - mgr manager.Manager + ca resource.ClientApplicator opts []ReconcilerOption } type want struct { @@ -102,13 +91,10 @@ func TestReconcile(t *testing.T) { "CompositeResourceDefinitionNotFound": { reason: "We should not return an error if the CompositeResourceDefinition was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), + }, }, }, want: want{ @@ -118,13 +104,10 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceDefinitionError": { reason: "We should return any other error encountered while getting a CompositeResourceDefinition.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, }, }, want: want{ @@ -134,13 +117,12 @@ func TestReconcile(t *testing.T) { "RenderCustomResourceDefinitionError": { reason: "We should return any error we encounter rendering a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return nil, errBoom })), @@ -153,18 +135,17 @@ func TestReconcile(t *testing.T) { "SetTerminatingConditionError": { reason: "We should return any error we encounter while setting the terminating status condition.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + d := o.(*v1.CompositeResourceDefinition) + d.SetDeletionTimestamp(&now) + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - d := o.(*v1.CompositeResourceDefinition) - d.SetDeletionTimestamp(&now) - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -177,24 +158,23 @@ func TestReconcile(t *testing.T) { "GetCustomResourceDefinitionError": { reason: "We should return any error we encounter getting a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + return errBoom + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - return errBoom - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -207,21 +187,20 @@ func TestReconcile(t *testing.T) { "RemoveFinalizerError": { reason: "We should return any error we encounter while removing a finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -237,21 +216,20 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue when deleted if we successfully cleaned up our CRD and removed our finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -267,29 +245,28 @@ func TestReconcile(t *testing.T) { "DeleteAllCustomResourcesError": { reason: "We should return any error we encounter while deleting all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -302,30 +279,29 @@ func TestReconcile(t *testing.T) { "ListCustomResourcesError": { reason: "We should return any error we encounter while listing all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -338,36 +314,35 @@ func TestReconcile(t *testing.T) { "WaitForDeleteAllOf": { reason: "We should record the pending deletion of defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -380,31 +355,30 @@ func TestReconcile(t *testing.T) { "DeleteCustomResourceDefinitionError": { reason: "We should return any error we encounter while deleting the CRD we created.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -417,42 +391,41 @@ func TestReconcile(t *testing.T) { "SuccessfulCleanup": { reason: "We should requeue to remove our finalizer once we've cleaned up our defined resources and CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.SetUID(owner) - want.SetDeletionTimestamp(&now) - want.Status.SetConditions(v1.TerminatingComposite()) - - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) - } + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.SetUID(owner) + want.SetDeletionTimestamp(&now) + want.Status.SetConditions(v1.TerminatingComposite()) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) + } - return nil - }), - }, - }), + return nil + }), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -465,13 +438,12 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error we encounter while adding a finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -487,16 +459,15 @@ func TestReconcile(t *testing.T) { "ApplyCustomResourceDefinitionError": { reason: "We should return any error we encounter while applying our CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return errBoom - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return errBoom }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -512,16 +483,15 @@ func TestReconcile(t *testing.T) { "CustomResourceDefinitionIsNotEstablished": { reason: "We should requeue if we're waiting for a newly created CRD to become established.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -537,29 +507,15 @@ func TestReconcile(t *testing.T) { "CreateControllerError": { reason: "We should return any error we encounter while starting our controller.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -574,9 +530,8 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockIsRunning: func(_ string) bool { return false }, - MockErr: func(_ string) error { return nil }, - MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { - return nil, errBoom + MockStart: func(_ string, _ ...engine.ControllerOption) error { + return errBoom }, }), }, @@ -589,38 +544,24 @@ func TestReconcile(t *testing.T) { "SuccessfulStart": { reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) - }, - }, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Status.SetConditions(v1.WatchingComposite()) - - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -634,23 +575,9 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockIsRunning: func(_ string) bool { return false }, - MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { - return mockNamedController{ - MockStart: func(_ context.Context) error { return nil }, - MockGetCache: func() cache.Cache { - return &mockCache{ - IndexFieldFn: func(_ context.Context, _ client.Object, _ string, _ client.IndexerFunc) error { - return nil - }, - WaitForCacheSyncFn: func(_ context.Context) bool { - return true - }, - } - }, - }, nil - }, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, }), }, }, @@ -661,51 +588,37 @@ func TestReconcile(t *testing.T) { "SuccessfulUpdateControllerVersion": { reason: "We should return without requeueing if we successfully ensured our CRD exists, the old controller stopped, and the new one started.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) - }, - }, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - d := obj.(*v1.CompositeResourceDefinition) - d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "old"} - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - want.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "new"} - want.Status.SetConditions(v1.WatchingComposite()) - - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + d := obj.(*v1.CompositeResourceDefinition) + d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "old"} + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + want.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "new"} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -719,23 +632,10 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, - MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { - return mockNamedController{ - MockStart: func(_ context.Context) error { return nil }, - MockGetCache: func() cache.Cache { - return &mockCache{ - IndexFieldFn: func(_ context.Context, _ client.Object, _ string, _ client.IndexerFunc) error { - return nil - }, - WaitForCacheSyncFn: func(_ context.Context) bool { - return true - }, - } - }, - }, nil - }, - MockStop: func(_ string) {}, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStop: func(_ context.Context, _ string) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, }), }, }, @@ -746,38 +646,24 @@ func TestReconcile(t *testing.T) { "NotRestartingWithoutVersionChange": { reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) - }, - }, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Status.SetConditions(v1.WatchingComposite()) - - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -792,10 +678,9 @@ func TestReconcile(t *testing.T) { }}), WithControllerEngine(&MockEngine{ MockIsRunning: func(_ string) bool { return true }, - MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockCreate: func(_ string, _ kcontroller.Options, _ ...engine.Watch) (engine.NamedController, error) { - t.Errorf("MockCreate should not be called") - return nil, nil + MockStart: func(_ string, _ ...engine.ControllerOption) error { + t.Errorf("MockStart should not be called") + return nil }, }), }, @@ -806,105 +691,17 @@ func TestReconcile(t *testing.T) { }, } - // Run every test with and without the realtime compositions feature. - rtc := apiextensionscontroller.Options{Options: controller.DefaultOptions()} - rtc.Features.Enable(features.EnableAlphaRealtimeCompositions) - - type mode struct { - name string - extra []ReconcilerOption - } - for _, m := range []mode{ - {name: "Default"}, - {name: string(features.EnableAlphaRealtimeCompositions), extra: []ReconcilerOption{WithOptions(rtc)}}, - } { - t.Run(m.name, func(t *testing.T) { - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, append(tc.args.opts, m.extra...)...) - got, err := r.Reconcile(context.Background(), reconcile.Request{}) + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + r := NewReconciler(tc.args.ca, tc.args.opts...) + got, err := r.Reconcile(context.Background(), reconcile.Request{}) - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) - } - if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) - } - }) + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) } }) } } - -type mockNamedController struct { - MockStart func(_ context.Context) error - MockGetCache func() cache.Cache -} - -func (m mockNamedController) Start(ctx context.Context) error { - return m.MockStart(ctx) -} - -func (m mockNamedController) GetCache() cache.Cache { - return m.MockGetCache() -} - -type mockManager struct { - manager.Manager - - GetCacheFn func() cache.Cache - GetClientFn func() client.Client - GetSchemeFn func() *runtime.Scheme - GetRESTMapperFn func() meta.RESTMapper - GetConfigFn func() *rest.Config - GetLoggerFn func() logr.Logger - GetControllerOptionsFn func() ctrlconfig.Controller -} - -func (m *mockManager) GetCache() cache.Cache { - return m.GetCacheFn() -} - -func (m *mockManager) GetClient() client.Client { - return m.GetClientFn() -} - -func (m *mockManager) GetScheme() *runtime.Scheme { - return m.GetSchemeFn() -} - -func (m *mockManager) GetRESTMapper() meta.RESTMapper { - return m.GetRESTMapperFn() -} - -func (m *mockManager) GetConfig() *rest.Config { - return m.GetConfigFn() -} - -func (m *mockManager) GetLogger() logr.Logger { - return m.GetLoggerFn() -} - -func (m *mockManager) GetControllerOptions() ctrlconfig.Controller { - return m.GetControllerOptionsFn() -} - -type mockCache struct { - cache.Cache - - ListFn func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error - IndexFieldFn func(_ context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error - WaitForCacheSyncFn func(_ context.Context) bool -} - -func (m *mockCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - return m.ListFn(ctx, list, opts...) -} - -func (m *mockCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - return m.IndexFieldFn(ctx, obj, field, extractValue) -} - -func (m *mockCache) WaitForCacheSync(ctx context.Context) bool { - return m.WaitForCacheSyncFn(ctx) -} diff --git a/internal/controller/apiextensions/offered/reconciler.go b/internal/controller/apiextensions/offered/reconciler.go index a5485ac904b..79098196393 100644 --- a/internal/controller/apiextensions/offered/reconciler.go +++ b/internal/controller/apiextensions/offered/reconciler.go @@ -30,9 +30,8 @@ import ( kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crossplane/crossplane-runtime/pkg/connection" @@ -68,6 +67,8 @@ const ( errApplyCRD = "cannot apply rendered composite resource claim CustomResourceDefinition" errUpdateStatus = "cannot update status of CompositeResourceDefinition" errStartController = "cannot start composite resource claim controller" + errStopController = "cannot stop composite resource claim controller" + errStartWatches = "cannot start composite resource claim controller watches" errAddFinalizer = "cannot add composite resource claim finalizer" errRemoveFinalizer = "cannot remove composite resource claim finalizer" errDeleteCRD = "cannot delete composite resource claim CustomResourceDefinition" @@ -90,12 +91,27 @@ const ( // A ControllerEngine can start and stop Kubernetes controllers on demand. type ControllerEngine interface { + Start(name string, o ...engine.ControllerOption) error + Stop(ctx context.Context, name string) error IsRunning(name string) bool - Start(name string, o kcontroller.Options, w ...engine.Watch) error - Stop(name string) - Err(name string) error + StartWatches(name string, ws ...engine.Watch) error } +// A NopEngine does nothing. +type NopEngine struct{} + +// Start does nothing. +func (e *NopEngine) Start(_ string, _ ...engine.ControllerOption) error { return nil } + +// Stop does nothing. +func (e *NopEngine) Stop(_ context.Context, _ string) error { return nil } + +// IsRunning always returns true. +func (e *NopEngine) IsRunning(_ string) bool { return true } + +// StartWatches does nothing. +func (e *NopEngine) StartWatches(_ string, _ ...engine.Watch) error { return nil } + // A CRDRenderer renders a CompositeResourceDefinition's corresponding // CustomResourceDefinition. type CRDRenderer interface { @@ -118,9 +134,10 @@ func (fn CRDRenderFn) Render(d *v1.CompositeResourceDefinition) (*extv1.CustomRe func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { name := "offered/" + strings.ToLower(v1.CompositeResourceDefinitionGroupKind) - r := NewReconciler(mgr, + r := NewReconciler(NewClientApplicator(o.ControllerClient), WithLogger(o.Logger.WithValues("controller", name)), WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + WithControllerEngine(o.ControllerEngine), WithOptions(o)) return ctrl.NewControllerManagedBy(mgr). @@ -169,7 +186,7 @@ func WithFinalizer(f resource.Finalizer) ReconcilerOption { // lifecycles of claim controllers. func WithControllerEngine(c ControllerEngine) ReconcilerOption { return func(r *Reconciler) { - r.claim.ControllerEngine = c + r.controller = c } } @@ -181,32 +198,30 @@ func WithCRDRenderer(c CRDRenderer) ReconcilerOption { } } -// WithClientApplicator specifies how the Reconciler should interact with the -// Kubernetes API. -func WithClientApplicator(ca resource.ClientApplicator) ReconcilerOption { - return func(r *Reconciler) { - r.client = ca +// NewClientApplicator returns a ClientApplicator suitable for use by the +// offered controller. +func NewClientApplicator(c client.Client) resource.ClientApplicator { + // TODO(negz): Can we avoid using this wrapper client? + // TODO(negz): Use server-side apply instead of a ClientApplicator. + uc := unstructured.NewClient(c) + return resource.ClientApplicator{ + Client: uc, + Applicator: resource.NewAPIUpdatingApplicator(uc), } } // NewReconciler returns a Reconciler of CompositeResourceDefinitions. -func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - +func NewReconciler(ca resource.ClientApplicator, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - mgr: mgr, - - client: resource.ClientApplicator{ - Client: kube, - Applicator: resource.NewAPIUpdatingApplicator(kube), - }, + client: ca, claim: definition{ - CRDRenderer: CRDRenderFn(xcrd.ForCompositeResourceClaim), - ControllerEngine: engine.New(mgr), - Finalizer: resource.NewAPIFinalizer(kube, finalizer), + CRDRenderer: CRDRenderFn(xcrd.ForCompositeResourceClaim), + Finalizer: resource.NewAPIFinalizer(ca, finalizer), }, + controller: &NopEngine{}, + log: logging.NewNopLogger(), record: event.NewNopRecorder(), @@ -223,17 +238,17 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { type definition struct { CRDRenderer - ControllerEngine resource.Finalizer } // A Reconciler reconciles CompositeResourceDefinitions. type Reconciler struct { - mgr manager.Manager client resource.ClientApplicator claim definition + controller ControllerEngine + log logging.Logger record event.Recorder @@ -294,11 +309,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // the (presumably exceedingly rare) latter case we'll orphan // the CRD. if !meta.WasCreated(crd) || !metav1.IsControlledBy(crd, d) { - // It's likely that we've already stopped this - // controller on a previous reconcile, but we try again - // just in case. This is a no-op if the controller was - // already stopped. - r.claim.Stop(claim.ControllerName(d.GetName())) + // It's likely that we've already stopped this controller on a + // previous reconcile, but we try again just in case. This is a + // no-op if the controller was already stopped. + if err := r.controller.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonRedactXRC, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource claim controller") if err := r.claim.RemoveFinalizer(ctx, d); err != nil { @@ -349,9 +367,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - // The controller should be stopped before the deletion of CRD - // so that it doesn't crash. - r.claim.Stop(claim.ControllerName(d.GetName())) + // The controller should be stopped before the deletion of CRD so that + // it doesn't crash. + if err := r.controller.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonRedactXRC, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource claim controller") if err := r.client.Delete(ctx, crd); resource.IgnoreNotFound(err) != nil { @@ -427,41 +449,53 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco secretsv1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig))))) } - cr := claim.NewReconciler(r.mgr, + cr := claim.NewReconciler(r.client, resource.CompositeClaimKind(d.GetClaimGroupVersionKind()), resource.CompositeKind(d.GetCompositeGroupVersionKind()), o...) ko := r.options.ForControllerRuntime() ko.Reconciler = ratelimiter.NewReconciler(claim.ControllerName(d.GetName()), errors.WithSilentRequeueOnConflict(cr), r.options.GlobalRateLimiter) - if err := r.claim.Err(claim.ControllerName(d.GetName())); err != nil { - log.Debug("Composite resource controller encountered an error", "error", err) - } - observed := d.Status.Controllers.CompositeResourceClaimTypeRef desired := v1.TypeReferenceTo(d.GetClaimGroupVersionKind()) if observed.APIVersion != "" && observed != desired { - r.claim.Stop(claim.ControllerName(d.GetName())) + if err := r.controller.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonOfferXRC, err)) + return reconcile.Result{}, err + } log.Debug("Referenceable version changed; stopped composite resource claim controller", "observed-version", observed.APIVersion, "desired-version", desired.APIVersion) } + if r.controller.IsRunning(claim.ControllerName(d.GetName())) { + log.Debug("Composite resource claim controller is running") + d.Status.SetConditions(v1.WatchingClaim()) + return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) + } + cm := &kunstructured.Unstructured{} cm.SetGroupVersionKind(d.GetClaimGroupVersionKind()) cp := &kunstructured.Unstructured{} cp.SetGroupVersionKind(d.GetCompositeGroupVersionKind()) - if err := r.claim.Start(claim.ControllerName(d.GetName()), ko, + if err := r.controller.Start(claim.ControllerName(d.GetName()), engine.WithRuntimeOptions(ko)); err != nil { + err = errors.Wrap(err, errStartController) + r.record.Event(d, event.Warning(reasonOfferXRC, err)) + return reconcile.Result{}, err + } + log.Debug("Started composite resource claim controller") + + if err := r.controller.StartWatches(claim.ControllerName(d.GetName()), engine.WatchFor(cm, &handler.EnqueueRequestForObject{}), engine.WatchFor(cp, &EnqueueRequestForClaim{}), ); err != nil { - err = errors.Wrap(err, errStartController) + err = errors.Wrap(err, errStartWatches) r.record.Event(d, event.Warning(reasonOfferXRC, err)) return reconcile.Result{}, err } - log.Debug("(Re)started composite resource claim controller") d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReferenceTo(d.GetClaimGroupVersionKind()) d.Status.SetConditions(v1.WatchingClaim()) diff --git a/internal/controller/apiextensions/offered/reconciler_test.go b/internal/controller/apiextensions/offered/reconciler_test.go index 1be2ad2ad15..13da8c31a29 100644 --- a/internal/controller/apiextensions/offered/reconciler_test.go +++ b/internal/controller/apiextensions/offered/reconciler_test.go @@ -29,40 +29,44 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/controller/engine" ) -var _ ControllerEngine = &MockEngine{} - type MockEngine struct { - ControllerEngine - MockStart func(name string, o kcontroller.Options, w ...engine.Watch) error - MockStop func(name string) - MockErr func(name string) error + MockStart func(name string, o ...engine.ControllerOption) error + MockStop func(ctx context.Context, name string) error + MockIsRunning func(name string) bool + MockStartWatches func(name string, ws ...engine.Watch) error +} + +var ( + _ ControllerEngine = &MockEngine{} + _ ControllerEngine = &NopEngine{} +) + +func (m *MockEngine) Start(name string, o ...engine.ControllerOption) error { + return m.MockStart(name, o...) } -func (m *MockEngine) Start(name string, o kcontroller.Options, w ...engine.Watch) error { - return m.MockStart(name, o, w...) +func (m *MockEngine) Stop(ctx context.Context, name string) error { + return m.MockStop(ctx, name) } -func (m *MockEngine) Stop(name string) { - m.MockStop(name) +func (m *MockEngine) IsRunning(name string) bool { + return m.MockIsRunning(name) } -func (m *MockEngine) Err(name string) error { - return m.MockErr(name) +func (m *MockEngine) StartWatches(name string, ws ...engine.Watch) error { + return m.MockStartWatches(name, ws...) } func TestReconcile(t *testing.T) { @@ -73,7 +77,7 @@ func TestReconcile(t *testing.T) { ctrlr := true type args struct { - mgr manager.Manager + ca resource.ClientApplicator opts []ReconcilerOption } type want struct { @@ -89,13 +93,10 @@ func TestReconcile(t *testing.T) { "CompositeResourceDefinitionNotFound": { reason: "We should not return an error if the CompositeResourceDefinition was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), + }, }, }, want: want{ @@ -105,13 +106,10 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceDefinitionError": { reason: "We should return any other error encountered while getting a CompositeResourceDefinition.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, }, }, want: want{ @@ -121,13 +119,12 @@ func TestReconcile(t *testing.T) { "RenderCompositeResourceDefinitionError": { reason: "We should return any error we encounter while rendering a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return nil, errBoom })), @@ -140,18 +137,17 @@ func TestReconcile(t *testing.T) { "SetTerminatingConditionError": { reason: "We should return any error we encounter while setting the terminating status condition.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + d := o.(*v1.CompositeResourceDefinition) + d.SetDeletionTimestamp(&now) + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - d := o.(*v1.CompositeResourceDefinition) - d.SetDeletionTimestamp(&now) - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -164,24 +160,23 @@ func TestReconcile(t *testing.T) { "GetCustomResourceDefinitionError": { reason: "We should return any error we encounter while getting a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + return errBoom + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - return errBoom - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -194,24 +189,26 @@ func TestReconcile(t *testing.T) { "RemoveFinalizerError": { reason: "We should return any error we encounter while removing a finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), WithFinalizer(resource.FinalizerFns{RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }}), @@ -224,24 +221,26 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue when deleted if we successfully cleaned up our CRD and removed our finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), WithFinalizer(resource.FinalizerFns{RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }}), @@ -254,29 +253,28 @@ func TestReconcile(t *testing.T) { "ListCustomResourcesError": { reason: "We should return any error we encounter while listing all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -289,36 +287,35 @@ func TestReconcile(t *testing.T) { "DeleteCustomResourcesError": { reason: "We should return any error we encounter while deleting defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -331,36 +328,35 @@ func TestReconcile(t *testing.T) { "SuccessfulDeleteCustomResources": { reason: "We should requeue to ensure our defined resources are gone before we remove our CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -373,35 +369,37 @@ func TestReconcile(t *testing.T) { "DeleteCustomResourceDefinitionError": { reason: "We should return any error we encounter while deleting the CRD we created.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{ - Spec: v1.CompositeResourceDefinitionSpec{}, - } - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{}, } - return nil - }), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), }, }, want: want{ @@ -411,44 +409,46 @@ func TestReconcile(t *testing.T) { "SuccessfulCleanup": { reason: "We should requeue to remove our finalizer once we've cleaned up our defined resources and CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.SetUID(owner) - want.SetDeletionTimestamp(&now) - want.Status.SetConditions(v1.TerminatingClaim()) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.SetUID(owner) + want.SetDeletionTimestamp(&now) + want.Status.SetConditions(v1.TerminatingClaim()) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) - } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) + } - return nil - }), - }, - }), + return nil + }), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), }, }, want: want{ @@ -458,13 +458,12 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error we encounter while adding a finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -480,16 +479,15 @@ func TestReconcile(t *testing.T) { "ApplyCRDError": { reason: "We should return any error we encounter while applying our CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return errBoom - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return errBoom }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -505,16 +503,15 @@ func TestReconcile(t *testing.T) { "CustomResourceDefinitionIsNotEstablished": { reason: "We should requeue if we're waiting for a newly created CRD to become established.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -530,16 +527,15 @@ func TestReconcile(t *testing.T) { "StartControllerError": { reason: "We should return any error we encounter while starting our controller.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -553,8 +549,8 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, - MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return errBoom }, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return errBoom }, }), }, }, @@ -565,25 +561,24 @@ func TestReconcile(t *testing.T) { "SuccessfulStart": { reason: "We should not requeue if we successfully ensured our CRD exists and controller is started.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Status.SetConditions(v1.WatchingClaim()) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingClaim()) - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -597,8 +592,9 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return errBoom }, // This error should only be logged. - MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, }, ), }, @@ -610,40 +606,39 @@ func TestReconcile(t *testing.T) { "SuccessfulUpdateControllerVersion": { reason: "We should not requeue if we successfully ensured our CRD exists, the old controller stopped, and the new one started.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - d := obj.(*v1.CompositeResourceDefinition) - d.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} - d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "old"} - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} - want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - want.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "new"} - want.Status.SetConditions(v1.WatchingClaim()) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + d := obj.(*v1.CompositeResourceDefinition) + d.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} + d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "old"} + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} + want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + want.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "new"} + want.Status.SetConditions(v1.WatchingClaim()) - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -657,9 +652,10 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, - MockStart: func(_ string, _ kcontroller.Options, _ ...engine.Watch) error { return nil }, - MockStop: func(_ string) {}, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStop: func(_ context.Context, _ string) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, }), }, }, @@ -671,7 +667,7 @@ func TestReconcile(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, append(tc.args.opts, WithLogger(testLog))...) + r := NewReconciler(tc.args.ca, append(tc.args.opts, WithLogger(testLog))...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { diff --git a/internal/controller/engine/cache.go b/internal/controller/engine/cache.go index b8264b9e8d8..7dc99dee776 100644 --- a/internal/controller/engine/cache.go +++ b/internal/controller/engine/cache.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Crossplane Authors. +Copyright 2024 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,10 @@ import ( "strings" "sync" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -31,275 +32,164 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -// GVKRoutedCache is a cache that routes requests by GVK to other caches. -type GVKRoutedCache struct { - scheme *runtime.Scheme - - fallback cache.Cache - - lock sync.RWMutex - delegates map[schema.GroupVersionKind]cache.Cache -} - -// NewGVKRoutedCache returns a new routed cache. -func NewGVKRoutedCache(scheme *runtime.Scheme, fallback cache.Cache) *GVKRoutedCache { - return &GVKRoutedCache{ - scheme: scheme, - fallback: fallback, - delegates: make(map[schema.GroupVersionKind]cache.Cache), - } -} +var ( + _ cache.Cache = &InformerTrackingCache{} + _ TrackingInformers = &InformerTrackingCache{} +) -var _ cache.Cache = &GVKRoutedCache{} +// TODO(negz): Propose adding a method like ActiveInformers to the upstream +// Informers implementation. This would allow us to remove this wrapper. -// AddDelegate adds a delegated cache for a given GVK. -func (c *GVKRoutedCache) AddDelegate(gvk schema.GroupVersionKind, delegate cache.Cache) { - c.lock.Lock() - defer c.lock.Unlock() +// An InformerTrackingCache wraps a cache.Cache and keeps track of what GVKs it +// has started informers for. It takes a blocking lock whenever a new informer +// is started or stopped, but so does the standard controller-runtime Cache +// implementation. +type InformerTrackingCache struct { + // The wrapped cache. + cache.Cache - c.delegates[gvk] = delegate -} - -// RemoveDelegate removes a delegated cache for a given GVK. -func (c *GVKRoutedCache) RemoveDelegate(gvk schema.GroupVersionKind) { - c.lock.Lock() - defer c.lock.Unlock() + scheme *runtime.Scheme - delete(c.delegates, gvk) + mx sync.RWMutex + active map[schema.GroupVersionKind]bool } -// Get retrieves an object for a given ObjectKey backed by a cache. -func (c *GVKRoutedCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", obj, err) - } - - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.Get(ctx, key, obj, opts...) +// TrackInformers wraps the supplied cache, adding a method to query which +// informers are active. +func TrackInformers(c cache.Cache, s *runtime.Scheme) *InformerTrackingCache { + return &InformerTrackingCache{ + Cache: c, + scheme: s, + active: make(map[schema.GroupVersionKind]bool), } - - return c.fallback.Get(ctx, key, obj, opts...) } -// List lists objects for a given ObjectList backed by a cache. -func (c *GVKRoutedCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - gvk, err := apiutil.GVKForObject(list, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", list, err) - } - - if !strings.HasSuffix(gvk.Kind, "List") { - // following controller-runtime here which does not support non - // List types. - return errors.Errorf("non-list type %T (kind %q) passed as output", list, gvk) - } - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") +// ActiveInformers returns the GVKs of the informers believed to currently be +// active. The InformerTrackingCache considers an informer to become active when +// a caller calls Get, List, or one of the GetInformer methods. It considers an +// informer to become inactive when a caller calls the RemoveInformer method. +func (c *InformerTrackingCache) ActiveInformers() []schema.GroupVersionKind { + c.mx.RLock() + defer c.mx.RUnlock() - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.List(ctx, list, opts...) + out := make([]schema.GroupVersionKind, 0, len(c.active)) + for gvk := range c.active { + out = append(out, gvk) } - - return c.fallback.List(ctx, list, opts...) + return out } -// GetInformer returns an informer for the given object. -func (c *GVKRoutedCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return nil, errors.Errorf("failed to get GVK for type %T: %w", obj, err) - } - - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.GetInformer(ctx, obj, opts...) +// Get retrieves an obj for the given object key from the Kubernetes Cluster. +// obj must be a struct pointer so that obj can be updated with the response +// returned by the Server. +// +// Getting an object marks the informer for the object's GVK active. +func (c *InformerTrackingCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if err := c.markActive(obj); err != nil { + return err } - return c.fallback.GetInformer(ctx, obj, opts...) + return c.Cache.Get(ctx, key, obj, opts...) } -// GetInformerForKind returns an informer for the given GVK. -func (c *GVKRoutedCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.GetInformerForKind(ctx, gvk, opts...) +// List retrieves list of objects for a given namespace and list options. On a +// successful call, Items field in the list will be populated with the result +// returned from the server. +// +// Listing objects marks the informer for the object's GVK active. +func (c *InformerTrackingCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if err := c.markActive(list); err != nil { + return err } - return c.fallback.GetInformerForKind(ctx, gvk, opts...) + return c.Cache.List(ctx, list, opts...) } -// RemoveInformer removes an informer entry and stops it if it was running. -func (c *GVKRoutedCache) RemoveInformer(ctx context.Context, obj client.Object) error { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", obj, err) - } - - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.RemoveInformer(ctx, obj) +// GetInformer fetches or constructs an informer for the given object that +// corresponds to a single API kind and resource. +// +// Getting an informer for an object marks the informer as active. +func (c *InformerTrackingCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + if err := c.markActive(obj); err != nil { + return nil, err } - - return c.fallback.RemoveInformer(ctx, obj) + return c.Cache.GetInformer(ctx, obj, opts...) } -// Start for a GVKRoutedCache is a no-op. Start must be called for each delegate. -func (c *GVKRoutedCache) Start(_ context.Context) error { - return nil -} +// GetInformerForKind is similar to GetInformer, except that it takes a +// group-version-kind, instead of the underlying object. +// +// Getting an informer marks the informer as active. +func (c *InformerTrackingCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion(gvk.GroupVersion().String()) + obj.SetKind(gvk.Kind) -// WaitForCacheSync for a GVKRoutedCache waits for all delegates and the -// fallback to sync, and returns false if any of them fails to sync. -func (c *GVKRoutedCache) WaitForCacheSync(ctx context.Context) bool { - c.lock.RLock() - syncedCh := make(chan bool, len(c.delegates)+1) - cas := make([]cache.Cache, 0, len(c.delegates)) - for _, ca := range c.delegates { - cas = append(cas, ca) + if err := c.markActive(obj); err != nil { + return nil, err } - cas = append(cas, c.fallback) - c.lock.RUnlock() - - var wg sync.WaitGroup - ctx, cancelFn := context.WithCancel(ctx) - - for _, ca := range cas { - wg.Add(1) - go func(ca cache.Cache) { - defer wg.Done() - synced := ca.WaitForCacheSync(ctx) - if !synced { - // first unsynced cache breaks the whole wait - cancelFn() - } - syncedCh <- synced - }(ca) - } - - wg.Wait() - close(syncedCh) - cancelFn() + return c.Cache.GetInformerForKind(ctx, gvk, opts...) +} - // any not synced? - for synced := range syncedCh { - if !synced { - return false - } +// RemoveInformer removes an informer entry and stops it if it was running. +// +// Removing an informer marks the informer as inactive. +func (c *InformerTrackingCache) RemoveInformer(ctx context.Context, obj client.Object) error { + if err := c.markInactive(obj); err != nil { + return err } - return c.fallback.WaitForCacheSync(ctx) + return c.Cache.RemoveInformer(ctx, obj) } -// IndexField adds an index with the given field name on the given object type -// by using the given function to extract the value for that field. -func (c *GVKRoutedCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { +func (c *InformerTrackingCache) markActive(obj runtime.Object) error { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", obj, err) + return errors.Wrap(err, "cannot determine group, version, and kind of supplied object") } - c.lock.RLock() - delegate, ok := c.delegates[gvk] - c.lock.RUnlock() - - if ok { - return delegate.IndexField(ctx, obj, field, extractValue) + if _, ok := obj.(metav1.ListInterface); ok { + // Bit of a hack, but it's what controller-runtime does. + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") } - return c.fallback.IndexField(ctx, obj, field, extractValue) -} - -// cachedRoutedClient wraps a client and routes read requests by GVK to a cache. -type cachedRoutedClient struct { - client.Client - - scheme *runtime.Scheme - cache *GVKRoutedCache -} + c.mx.RLock() + _, active := c.active[gvk] + c.mx.RUnlock() -func (c *cachedRoutedClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", obj, err) + // This isn't so bad. The default controller-runtime cache.Cache + // implementation takes a lock when starting an informer anyway. + if !active { + c.mx.Lock() + c.active[gvk] = true + c.mx.Unlock() } - c.cache.lock.RLock() - delegate, ok := c.cache.delegates[gvk] - c.cache.lock.RUnlock() - - if ok { - return delegate.Get(ctx, key, obj, opts...) - } - - return c.Client.Get(ctx, key, obj, opts...) + return nil } -func (c *cachedRoutedClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - gvk, err := apiutil.GVKForObject(list, c.scheme) +func (c *InformerTrackingCache) markInactive(obj runtime.Object) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { - return errors.Errorf("failed to get GVK for type %T: %w", list, err) - } - - if !strings.HasSuffix(gvk.Kind, "List") { - // following controller-runtime here which does not support non - // List types. - return errors.Errorf("non-list type %T (kind %q) passed as output", list, gvk) + return errors.Wrap(err, "cannot determine group, version, and kind of supplied object") } - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - - c.cache.lock.RLock() - delegate, ok := c.cache.delegates[gvk] - c.cache.lock.RUnlock() - if ok { - return delegate.List(ctx, list, opts...) + if _, ok := obj.(metav1.ListInterface); ok { + // Bit of a hack, but it's what controller-runtime does. + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") } - return c.Client.List(ctx, list, opts...) -} + c.mx.RLock() + _, active := c.active[gvk] + c.mx.RUnlock() -// WithGVKRoutedCache returns a manager backed by a GVKRoutedCache. The client -// returned by the manager will route read requests to cached GVKs. -func WithGVKRoutedCache(c *GVKRoutedCache, mgr controllerruntime.Manager) controllerruntime.Manager { - return &routedManager{ - Manager: mgr, - client: &cachedRoutedClient{ - Client: mgr.GetClient(), - scheme: mgr.GetScheme(), - cache: c, - }, - cache: c, + // This isn't so bad. The default controller-runtime cache.Cache + // implementation takes a lock when stopping an informer anyway. + if active { + c.mx.Lock() + delete(c.active, gvk) + c.mx.Unlock() } -} - -type routedManager struct { - controllerruntime.Manager - - client client.Client - cache cache.Cache -} - -func (m *routedManager) GetClient() client.Client { - return m.client -} -func (m *routedManager) GetCache() cache.Cache { - return m.cache + return nil } diff --git a/internal/controller/engine/engine.go b/internal/controller/engine/engine.go index 552aeb1a45a..74956d942a3 100644 --- a/internal/controller/engine/engine.go +++ b/internal/controller/engine/engine.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Crossplane Authors. +Copyright 2024 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,148 +14,231 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package engine provides utilties for working with controllers. +// Package engine manages the lifecycle of a set of controllers. package engine import ( "context" "sync" + "time" - "k8s.io/client-go/rest" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/controller" + kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" ) -// Error strings. -const ( - errCreateCache = "cannot create new cache" - errCreateController = "cannot create new controller" - errCrashCache = "cache error" - errCrashController = "controller error" - errWatch = "cannot setup watch" -) +// A ControllerEngine manages a set of controllers that can be dynamically +// started and stopped. It also manages a dynamic set of watches per controller, +// and the informers that back them. +type ControllerEngine struct { + // The manager of this engine's controllers. + mgr manager.Manager -// A NewCacheFn creates a new controller-runtime cache. -type NewCacheFn func(cfg *rest.Config, o cache.Options) (cache.Cache, error) + // The engine must have exclusive use of these informers. All controllers + // managed by the engine should use these informers. + infs TrackingInformers -// A NewControllerFn creates a new controller-runtime controller. -type NewControllerFn func(name string, m manager.Manager, o controller.Options) (controller.Controller, error) + log logging.Logger -// The default new cache and new controller functions. -// -//nolint:gochecknoglobals // We treat these as constants. -var ( - DefaultNewCacheFn NewCacheFn = cache.New - DefaultNewControllerFn NewControllerFn = controller.NewUnmanaged -) + // Protects everything below. + mx sync.RWMutex -// An ControllerEngine manages the lifecycles of controller-runtime controllers -// (and their caches). The lifecycles of the controllers are not coupled to -// lifecycle of the engine, nor to the lifecycle of the controller manager it -// uses. -type ControllerEngine struct { - mgr manager.Manager + // Running controllers, by name. + controllers map[string]*controller +} - started map[string]context.CancelFunc - errors map[string]error - mx sync.RWMutex +// TrackingInformers is a set of Informers. It tracks which are active. +type TrackingInformers interface { + cache.Informers + ActiveInformers() []schema.GroupVersionKind +} + +// New creates a new controller engine. +func New(mgr manager.Manager, infs TrackingInformers, o ...ControllerEngineOption) *ControllerEngine { + e := &ControllerEngine{ + mgr: mgr, + infs: infs, + log: logging.NewNopLogger(), + controllers: make(map[string]*controller), + } - newCache NewCacheFn - newCtrl NewControllerFn + for _, fn := range o { + fn(e) + } + + return e } -// An ControllerEngineOption configures a ControllerEngine. +// An ControllerEngineOption configures a controller engine. type ControllerEngineOption func(*ControllerEngine) -// WithNewCacheFn may be used to configure a different cache implementation. -// DefaultNewCacheFn is used by default. -func WithNewCacheFn(fn NewCacheFn) ControllerEngineOption { +// WithLogger configures an Engine to use a logger. +func WithLogger(l logging.Logger) ControllerEngineOption { return func(e *ControllerEngine) { - e.newCache = fn + e.log = l } } -// WithNewControllerFn may be used to configure a different controller -// implementation. DefaultNewControllerFn is used by default. -func WithNewControllerFn(fn NewControllerFn) ControllerEngineOption { - return func(e *ControllerEngine) { - e.newCtrl = fn - } +type controller struct { + // The running controller. + ctrl kcontroller.Controller + + // Called to stop the controller. + cancel context.CancelFunc + + // Protects the below map. + mx sync.RWMutex + + // The controller's sources, by watched GVK. + sources map[schema.GroupVersionKind]*StoppableSource } -// New produces a new ControllerEngine. -func New(mgr manager.Manager, o ...ControllerEngineOption) *ControllerEngine { - e := &ControllerEngine{ - mgr: mgr, +// A WatchGarbageCollector periodically garbage collects watches. +type WatchGarbageCollector interface { + GarbageCollectWatches(ctx context.Context, interval time.Duration) +} - started: make(map[string]context.CancelFunc), - errors: make(map[string]error), +// ControllerOptions configure a controller. +type ControllerOptions struct { + runtime kcontroller.Options + gc WatchGarbageCollector +} - newCache: DefaultNewCacheFn, - newCtrl: DefaultNewControllerFn, - } +// A ControllerOption configures a controller. +type ControllerOption func(o *ControllerOptions) - for _, eo := range o { - eo(e) +// WithRuntimeOptions configures the underlying controller-runtime controller. +func WithRuntimeOptions(ko kcontroller.Options) ControllerOption { + return func(o *ControllerOptions) { + o.runtime = ko } +} - return e +// WithWatchGarbageCollector specifies an optional garbage collector this +// controller should use to remove unused watches. +func WithWatchGarbageCollector(gc WatchGarbageCollector) ControllerOption { + return func(o *ControllerOptions) { + o.gc = gc + } } -// IsRunning indicates whether the named controller is running - i.e. whether it -// has been started and does not appear to have crashed. -func (e *ControllerEngine) IsRunning(name string) bool { +// Start a new controller. +func (e *ControllerEngine) Start(name string, o ...ControllerOption) error { + co := &ControllerOptions{} + for _, fn := range o { + fn(co) + } + + // Start is a no-op if the controller is already running. e.mx.RLock() - defer e.mx.RUnlock() + _, ok := e.controllers[name] + e.mx.RUnlock() + if ok { + return nil + } - _, running := e.started[name] - return running -} + c, err := kcontroller.NewUnmanaged(name, e.mgr, co.runtime) + if err != nil { + return errors.Wrap(err, "cannot create new controller") + } -// Err returns any error encountered by the named controller. The returned error -// is always nil if the named controller is running. -func (e *ControllerEngine) Err(name string) error { - e.mx.RLock() - defer e.mx.RUnlock() + // The caller will usually be a reconcile method. We want the controller + // to keep running when the reconcile ends, so we create a new context + // instead of taking one as an argument. + ctx, cancel := context.WithCancel(context.Background()) - return e.errors[name] -} + go func() { + // Don't start the controller until the manager is elected. + <-e.mgr.Elected() -// Stop the named controller. -func (e *ControllerEngine) Stop(name string) { - e.done(name, nil) -} + e.log.Debug("Starting new controller", "controller", name) + + // Run the controller until its context is cancelled. + if err := c.Start(ctx); err != nil { + e.log.Info("Controller returned an error", "name", name, "error", err) + } + + e.log.Debug("Stopped controller", "controller", name) + }() + + if co.gc != nil { + go func() { + // Don't start the garbage collector until the manager is elected. + <-e.mgr.Elected() + + e.log.Debug("Starting watch garbage collector for controller", "controller", name) + + // Run the collector every minute until its context is cancelled. + co.gc.GarbageCollectWatches(ctx, 1*time.Minute) + + e.log.Debug("Stopped watch garbage collector for controller", "controller", name) + }() + } + + r := &controller{ + ctrl: c, + cancel: cancel, + sources: make(map[schema.GroupVersionKind]*StoppableSource), + } -func (e *ControllerEngine) done(name string, err error) { e.mx.Lock() - defer e.mx.Unlock() + e.controllers[name] = r + e.mx.Unlock() - stop, ok := e.started[name] - if ok { - stop() - delete(e.started, name) + return nil +} + +// Stop a controller. +func (e *ControllerEngine) Stop(ctx context.Context, name string) error { + e.mx.RLock() + c, running := e.controllers[name] + e.mx.RUnlock() + + // Stop is a no-op if the controller isn't running. + if !running { + return nil + } + + // First stop the controller's watches. + if err := e.StopWatches(ctx, name); err != nil { + return errors.Wrapf(err, "cannot stop watches for controller %q", name) } - // Don't overwrite the first error if done is called multiple times. - if e.errors[name] != nil { - return + // Then stop any informers that only this controller was using. + if err := e.RemoveUnwatchedInformers(ctx); err != nil { + return errors.Wrapf(err, "cannot remove unwatched informers after stopping watches for controller %q", name) } - e.errors[name] = err + + // Finally, stop and delete the controller. + e.mx.Lock() + c.cancel() + delete(e.controllers, name) + e.mx.Unlock() + + e.log.Debug("Stopped controller", "controller", name) + return nil +} + +// IsRunning returns true if the named controller is running. +func (e *ControllerEngine) IsRunning(name string) bool { + e.mx.RLock() + _, running := e.controllers[name] + e.mx.RUnlock() + return running } // Watch an object. type Watch struct { - // A watch is either a customSource, or a kind, handler, and predicates. - customSource source.Source - kind client.Object handler handler.EventHandler predicates []predicate.Predicate @@ -168,124 +251,161 @@ func WatchFor(kind client.Object, h handler.EventHandler, p ...predicate.Predica return Watch{kind: kind, handler: h, predicates: p} } -// WatchTriggeredBy returns a custom watch for secondary resources triggering -// the controller. source.Kind can be used to create a source for a secondary -// cache. Events will be handled by the supplied EventHandler, and may be -// filtered by the supplied predicates. -func WatchTriggeredBy(source source.Source) Watch { - return Watch{customSource: source} -} +// StartWatches instructs the named controller to start the supplied watches. +// The controller will only start a watch if it's not already watching the type +// of object specified by the supplied Watch. StartWatches blocks other +// operations on the same controller if and when it starts a watch. +func (e *ControllerEngine) StartWatches(name string, ws ...Watch) error { + e.mx.RLock() + c, ok := e.controllers[name] + e.mx.RUnlock() -// Start the named controller. Each controller is started with its own cache -// whose lifecycle is coupled to the controller. The controller is started with -// the supplied options, and configured with the supplied watches. Start does -// not block. -func (e *ControllerEngine) Start(name string, o controller.Options, w ...Watch) error { - c, err := e.Create(name, o, w...) - if err != nil { - return err + if !ok { + return errors.Errorf("controller %q is not running", name) } - return c.Start(context.Background()) -} -// NamedController is a controller that's not yet started. It gives access to -// the underlying cache, which may be used e.g. to add indexes. -type NamedController interface { - Start(ctx context.Context) error - GetCache() cache.Cache -} + watchExists := make(map[schema.GroupVersionKind]bool) + c.mx.RLock() + for gvk := range c.sources { + watchExists[gvk] = true + } + c.mx.RUnlock() + + // It's possible that we didn't explicitly stop a watch, but its backing + // informer was removed. This implicitly stops the watch by deleting its + // backing listener. If a watch exists but doesn't have an active informer, + // we want to restart the watch (and, implicitly, the informer). + a := e.infs.ActiveInformers() + activeInformer := make(map[schema.GroupVersionKind]bool, len(a)) + for _, gvk := range a { + activeInformer[gvk] = true + } -type namedController struct { - name string - e *ControllerEngine - ca cache.Cache - ctrl controller.Controller -} + // Using a map here deduplicates watches by GVK. If we're asked to start + // several watches for the same GVK at in the same call, we'll only start + // the last one. + start := make(map[schema.GroupVersionKind]Watch, len(ws)) + for _, w := range ws { + gvk, err := apiutil.GVKForObject(w.kind, e.mgr.GetScheme()) + if err != nil { + return errors.Wrapf(err, "cannot determine group, version, and kind for %T", w.kind) + } -// Create the named controller. Each controller gets its own cache -// whose lifecycle is coupled to the controller. The controller is created with -// the supplied options, and configured with the supplied watches. It is not -// started yet. -func (e *ControllerEngine) Create(name string, o controller.Options, w ...Watch) (NamedController, error) { - // Each controller gets its own cache for the GVKs it owns. This cache is - // wrapped by a GVKRoutedCache that routes requests to other GVKs to the - // manager's cache. This way we can share informers for composed resources - // (that's where this is primarily used) with other controllers, but get - // control about the lifecycle of the owned GVKs' informers. - ca, err := e.newCache(e.mgr.GetConfig(), cache.Options{Scheme: e.mgr.GetScheme(), Mapper: e.mgr.GetRESTMapper()}) - if err != nil { - return nil, errors.Wrap(err, errCreateCache) - } + // We've already created this watch and the informer backing it is still + // running. We don't need to create a new watch. + if watchExists[gvk] && activeInformer[gvk] { + e.log.Debug("Watch exists for GVK, not starting a new one", "controller", name, "watched-gvk", gvk) + continue + } - // Wrap the existing manager to use our cache for the GVKs of this controller. - rc := NewGVKRoutedCache(e.mgr.GetScheme(), e.mgr.GetCache()) - rm := &routedManager{ - Manager: e.mgr, - client: &cachedRoutedClient{ - Client: e.mgr.GetClient(), - scheme: e.mgr.GetScheme(), - cache: rc, - }, - cache: rc, + start[gvk] = w } - ctrl, err := e.newCtrl(name, rm, o) - if err != nil { - return nil, errors.Wrap(err, errCreateController) + // Don't take any write locks if there's no watches to start. + if len(start) == 0 { + return nil } - for _, wt := range w { - if wt.customSource != nil { - if err := ctrl.Watch(wt.customSource); err != nil { - return nil, errors.Wrap(err, errWatch) - } - continue + // TODO(negz): If this blocks too much, we could alleviate it a bit by + // reading watches to start from a buffered channel. + + // Take the write lock. This will block any other callers that want to + // update the watches for the same controller. + c.mx.Lock() + defer c.mx.Unlock() + + // Start new sources. + for gvk, w := range start { + // The controller's Watch method just calls the StoppableSource's Start + // method, passing in its private work queue as an argument. This + // will start an informer for the watched kind if there isn't one + // running already. + // + // The watch will stop sending events when either the source is stopped, + // or its backing informer is stopped. The controller's work queue will + // stop processing events when the controller is stopped. + src := NewStoppableSource(e.infs, w.kind, w.handler, w.predicates...) + if err := c.ctrl.Watch(src); err != nil { + return errors.Wrapf(err, "cannot start watch for %q", gvk) } - // route cache and client (read) requests to our cache for this GVK. - gvk, err := apiutil.GVKForObject(wt.kind, e.mgr.GetScheme()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get GVK for type %T", wt.kind) - } - rc.AddDelegate(gvk, ca) + // Record that we're now running this source. + c.sources[gvk] = src - if err := ctrl.Watch(source.Kind(ca, wt.kind, wt.handler, wt.predicates...)); err != nil { - return nil, errors.Wrap(err, errWatch) - } + e.log.Debug("Started watching GVK", "controller", name, "watched-gvk", gvk) } - return &namedController{name: name, e: e, ca: ca, ctrl: ctrl}, nil + return nil } -// Start the named controller. Start does not block. -func (c *namedController) Start(ctx context.Context) error { - if c.e.IsRunning(c.name) { +// StopWatches stops all watches for the supplied controller, except watches for +// the GVKs supplied by the keep argument. StopWatches blocks other operations +// on the same controller if and when it stops a watch. +func (e *ControllerEngine) StopWatches(ctx context.Context, name string, keep ...schema.GroupVersionKind) error { + e.mx.RLock() + c, ok := e.controllers[name] + e.mx.RUnlock() + + if !ok { + return errors.Errorf("controller %q is not running", name) + } + + c.mx.RLock() + stop := sets.KeySet(c.sources).Difference(sets.New(keep...)) + c.mx.RUnlock() + + // Don't take the write lock if we actually want to keep all watches. + if len(stop) == 0 { return nil } - ctx, stop := context.WithCancel(ctx) - c.e.mx.Lock() - c.e.started[c.name] = stop - c.e.errors[c.name] = nil - c.e.mx.Unlock() + c.mx.Lock() + defer c.mx.Unlock() - go func() { - <-c.e.mgr.Elected() - c.e.done(c.name, errors.Wrap(c.ca.Start(ctx), errCrashCache)) - }() - go func() { - <-c.e.mgr.Elected() - if synced := c.ca.WaitForCacheSync(ctx); !synced { - c.e.done(c.name, errors.New(errCrashCache)) - return + for gvk := range stop { + if err := c.sources[gvk].Stop(ctx); err != nil { + return errors.Wrapf(err, "cannot stop watch for %q", gvk) } - c.e.done(c.name, errors.Wrap(c.ctrl.Start(ctx), errCrashController)) - }() + delete(c.sources, gvk) + e.log.Debug("Stopped watching GVK", "controller", name, "watched-gvk", gvk) + } return nil } -// GetCache returns the cache used by the named controller. -func (c *namedController) GetCache() cache.Cache { - return c.ca +// RemoveUnwatchedInformers removes all informers that don't power any +// controller's watches. This may include informers that weren't started by a +// watch, e.g. informers that were started by a Get or List call to a cache +// backed by the same informers as the Engine. +func (e *ControllerEngine) RemoveUnwatchedInformers(ctx context.Context) error { + // Build the set of GVKs any controller is currently watching. + e.mx.RLock() + watching := make(map[schema.GroupVersionKind]bool, len(e.controllers)) + for _, c := range e.controllers { + c.mx.RLock() + for gvk := range c.sources { + watching[gvk] = true + } + c.mx.RUnlock() + } + e.mx.RUnlock() + + for _, gvk := range e.infs.ActiveInformers() { + // A controller is still watching this GVK. Don't remove its informer. + if watching[gvk] { + e.log.Debug("Not removing informer. At least one controller is still watching its GVK.", "watched-gvk", gvk) + continue + } + + // RemoveInformer only uses the supplied object to get its GVK. + u := &unstructured.Unstructured{} + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) + if err := e.infs.RemoveInformer(ctx, u); err != nil { + return errors.Wrapf(err, "cannot remove informer for %q", gvk) + } + e.log.Debug("Removed informer. No controller watches its GVK.", "watched-gvk", gvk) + } + + return nil } diff --git a/internal/controller/engine/engine_test.go b/internal/controller/engine/engine_test.go index 540ddee6e45..45735399773 100644 --- a/internal/controller/engine/engine_test.go +++ b/internal/controller/engine/engine_test.go @@ -22,16 +22,9 @@ import ( "time" "github.com/google/go-cmp/cmp" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/test" ) @@ -41,170 +34,34 @@ type MockCache struct { MockStart func(stop context.Context) error } -func (c *MockCache) Start(stop context.Context) error { - return c.MockStart(stop) -} - -func (c *MockCache) WaitForCacheSync(_ context.Context) bool { - return true -} - -type MockController struct { - controller.Controller - - MockStart func(stop context.Context) error - MockWatch func(s source.Source) error -} - -func (c *MockController) Start(stop context.Context) error { - return c.MockStart(stop) -} - -func (c *MockController) Watch(s source.Source) error { - return c.MockWatch(s) -} - -func TestEngine(t *testing.T) { - errBoom := errors.New("boom") - +func TestStartStopController(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + opts []ControllerEngineOption + } type args struct { name string - o controller.Options - w []Watch + opts []ControllerOption } type want struct { - err error - crash error + startErr error + stopErr error } cases := map[string]struct { reason string - e *ControllerEngine + params params args args want want }{ - "NewCacheError": { - reason: "Errors creating a new cache should be returned", - e: New(&fake.Manager{}, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, errBoom }), - ), - args: args{ - name: "coolcontroller", - }, - want: want{ - err: errors.Wrap(errBoom, errCreateCache), - }, - }, - "NewControllerError": { - reason: "Errors creating a new controller should be returned", - e: New( - &fake.Manager{ - Scheme: runtime.NewScheme(), - Cache: &MockCache{}, - }, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { return nil, errBoom }), - ), - args: args{ - name: "coolcontroller", - }, - want: want{ - err: errors.Wrap(errBoom, errCreateController), - }, - }, - "WatchError": { - reason: "Errors adding a watch should be returned", - e: New( - &fake.Manager{ - Scheme: runtime.NewScheme(), - Cache: &MockCache{}, - }, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockWatch: func(source.Source) error { return errBoom }} - return c, nil - }), - ), - args: args{ - name: "coolcontroller", - w: []Watch{WatchFor(&unstructured.Unstructured{ - Object: map[string]interface{}{"apiVersion": "example.org/v1", "kind": "Thing"}, - }, nil)}, - }, - want: want{ - err: errors.Wrap(errBoom, errWatch), - }, - }, - "SchemeError": { - reason: "Passing an object of unknown GVK", - e: New( - &fake.Manager{ - Scheme: runtime.NewScheme(), - Cache: &MockCache{}, - }, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockWatch: func(source.Source) error { return errBoom }} - return c, nil - }), - ), - args: args{ - name: "coolcontroller", - w: []Watch{WatchFor(&unstructured.Unstructured{}, nil)}, - }, - want: want{ - err: errors.Wrap(runtime.NewMissingKindErr("unstructured object has no kind"), "failed to get GVK for type *unstructured.Unstructured"), - }, - }, - "CacheCrashError": { - reason: "Errors starting or running a cache should be returned", - e: New(&fake.Manager{}, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { - c := &MockCache{MockStart: func(_ context.Context) error { return errBoom }} - return c, nil - }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockStart: func(_ context.Context) error { - return nil - }} - return c, nil - }), - ), - args: args{ - name: "coolcontroller", - }, - want: want{ - crash: errors.Wrap(errBoom, errCrashCache), - }, - }, - "ControllerCrashError": { - reason: "Errors starting or running a controller should be returned", - e: New(&fake.Manager{}, - WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { - c := &MockCache{MockStart: func(_ context.Context) error { - return nil - }} - return c, nil - }), - WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { - c := &MockController{MockStart: func(_ context.Context) error { - return errBoom - }} - return c, nil - }), - ), - args: args{ - name: "coolcontroller", - }, - want: want{ - crash: errors.Wrap(errBoom, errCrashController), - }, - }, + // TODO(negz): Write tests. } for name, tc := range cases { t.Run(name, func(t *testing.T) { - err := tc.e.Start(tc.args.name, tc.args.o, tc.args.w...) - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + e := New(tc.params.mgr, tc.params.infs, tc.params.opts...) + err := e.Start(tc.args.name, tc.args.opts...) + if diff := cmp.Diff(tc.want.startErr, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) } @@ -212,9 +69,11 @@ func TestEngine(t *testing.T) { // becomes flaky or time consuming we could use a ticker instead. time.Sleep(100 * time.Millisecond) - tc.e.Stop(tc.args.name) - if diff := cmp.Diff(tc.want.crash, tc.e.Err(tc.args.name), test.EquateErrors()); diff != "" { - t.Errorf("\n%s\ne.Err(...): -want error, +got error:\n%s", tc.reason, diff) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + err = e.Stop(ctx, tc.args.name) + if diff := cmp.Diff(tc.want.stopErr, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) } }) } diff --git a/internal/controller/engine/source.go b/internal/controller/engine/source.go new file mode 100644 index 00000000000..8f8d3ede90b --- /dev/null +++ b/internal/controller/engine/source.go @@ -0,0 +1,189 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + + kcache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +var _ source.Source = &StoppableSource{} + +// NewStoppableSource returns a new watch source that can be stopped. +func NewStoppableSource(infs cache.Informers, t client.Object, h handler.EventHandler, ps ...predicate.Predicate) *StoppableSource { + return &StoppableSource{infs: infs, Type: t, handler: h, predicates: ps} +} + +// A StoppableSource is a controller-runtime watch source that can be stopped. +type StoppableSource struct { + infs cache.Informers + + Type client.Object + handler handler.EventHandler + predicates []predicate.Predicate + + reg kcache.ResourceEventHandlerRegistration +} + +// Start is internal and should be called only by the Controller to register +// an EventHandler with the Informer to enqueue reconcile.Requests. +func (s *StoppableSource) Start(ctx context.Context, q workqueue.RateLimitingInterface) error { + i, err := s.infs.GetInformer(ctx, s.Type, cache.BlockUntilSynced(true)) + if err != nil { + return errors.Wrapf(err, "cannot get informer for %T", s.Type) + } + + reg, err := i.AddEventHandler(NewEventHandler(ctx, q, s.handler, s.predicates...).HandlerFuncs()) + if err != nil { + return errors.Wrapf(err, "cannot add event handler") + } + s.reg = reg + + return nil +} + +// Stop removes the EventHandler from the source's Informer. The Informer will +// stop sending events to the source. +func (s *StoppableSource) Stop(ctx context.Context) error { + if s.reg == nil { + return nil + } + + i, err := s.infs.GetInformer(ctx, s.Type) + if err != nil { + return errors.Wrapf(err, "cannot get informer for %T", s.Type) + } + + return errors.Wrap(i.RemoveEventHandler(s.reg), "cannot remove event handler") +} + +// NewEventHandler creates a new EventHandler. +func NewEventHandler(ctx context.Context, q workqueue.RateLimitingInterface, h handler.EventHandler, ps ...predicate.Predicate) *EventHandler { + return &EventHandler{ + ctx: ctx, + handler: h, + queue: q, + predicates: ps, + } +} + +// An EventHandler converts a controller-runtime handler and predicates into a +// client-go ResourceEventHandler. It's a stripped down version of +// controller-runtime's internal TypedEventHandler implementation. +type EventHandler struct { + ctx context.Context //nolint:containedctx // Kept for compatibility with controller-runtime. + + handler handler.EventHandler + queue workqueue.RateLimitingInterface + predicates []predicate.Predicate +} + +// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs. +func (e *EventHandler) HandlerFuncs() kcache.ResourceEventHandlerFuncs { + return kcache.ResourceEventHandlerFuncs{ + AddFunc: e.OnAdd, + UpdateFunc: e.OnUpdate, + DeleteFunc: e.OnDelete, + } +} + +// OnAdd creates CreateEvent and calls Create on EventHandler. +func (e *EventHandler) OnAdd(obj interface{}) { + o, ok := obj.(client.Object) + if !ok { + return + } + + c := event.CreateEvent{Object: o} + for _, p := range e.predicates { + if !p.Create(c) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Create(ctx, c, e.queue) +} + +// OnUpdate creates UpdateEvent and calls Update on EventHandler. +func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) { + o, ok := oldObj.(client.Object) + if !ok { + return + } + + n, ok := newObj.(client.Object) + if !ok { + return + } + + u := event.UpdateEvent{ObjectOld: o, ObjectNew: n} + + for _, p := range e.predicates { + if !p.Update(u) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Update(ctx, u, e.queue) +} + +// OnDelete creates DeleteEvent and calls Delete on EventHandler. +func (e *EventHandler) OnDelete(obj interface{}) { + var d event.DeleteEvent + + switch o := obj.(type) { + case client.Object: + d = event.DeleteEvent{Object: o} + + // Deal with tombstone events by pulling the object out. Tombstone events + // wrap the object in a DeleteFinalStateUnknown struct, so the object needs + // to be pulled out. + case kcache.DeletedFinalStateUnknown: + wrapped, ok := o.Obj.(client.Object) + if !ok { + return + } + d = event.DeleteEvent{DeleteStateUnknown: true, Object: wrapped} + + default: + return + } + + for _, p := range e.predicates { + if !p.Delete(d) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Delete(ctx, d, e.queue) +}