diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 183f99c1e288..f6bc296a91c5 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -108,30 +108,31 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl } b := ctrl.NewControllerManagedBy(mgr). - For(&bootstrapv1.KubeadmConfig{}). + Add(builder.For(mgr, + &bootstrapv1.KubeadmConfig{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &bootstrapv1.KubeadmConfig{}), + )). WithOptions(options). - Watches( + Add(builder.Watches(mgr, &clusterv1.Machine{}, - handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc), - ).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)) + handler.EnqueueRequestsFromObjectMap(r.MachineToBootstrapMapFunc), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}), + )) if feature.Gates.Enabled(feature.MachinePool) { - b = b.Watches( + b = b.Add(builder.Watches(mgr, &expv1.MachinePool{}, - handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc), - ) + handler.EnqueueRequestsFromObjectMap(r.MachinePoolToBootstrapMapFunc), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &expv1.MachinePool{}), + )) } - b = b.Watches( + b = b.Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmConfigs), - builder.WithPredicates( - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), - predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), - ), - ), - ) + handler.EnqueueRequestsFromObjectMap(r.ClusterToKubeadmConfigs), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + )) if err := b.Complete(r); err != nil { return errors.Wrap(err, "failed setting up with a controller manager") @@ -875,14 +876,8 @@ func (r *KubeadmConfigReconciler) tokenCheckRefreshOrRotationInterval() time.Dur // ClusterToKubeadmConfigs is a handler.ToRequestsFunc to be used to enqueue // requests for reconciliation of KubeadmConfigs. -func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(ctx context.Context, o client.Object) []ctrl.Request { +func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(ctx context.Context, c *clusterv1.Cluster) []ctrl.Request { result := []ctrl.Request{} - - c, ok := o.(*clusterv1.Cluster) - if !ok { - panic(fmt.Sprintf("Expected a Cluster but got a %T", o)) - } - selectors := []client.ListOption{ client.InNamespace(c.Namespace), client.MatchingLabels{ @@ -923,12 +918,7 @@ func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(ctx context.Context, o // MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue // request for reconciliation of KubeadmConfig. -func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { - m, ok := o.(*clusterv1.Machine) - if !ok { - panic(fmt.Sprintf("Expected a Machine but got a %T", o)) - } - +func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, m *clusterv1.Machine) []ctrl.Request { result := []ctrl.Request{} if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == bootstrapv1.GroupVersion.WithKind("KubeadmConfig") { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} @@ -939,12 +929,7 @@ func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o // MachinePoolToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue // request for reconciliation of KubeadmConfig. -func (r *KubeadmConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { - m, ok := o.(*expv1.MachinePool) - if !ok { - panic(fmt.Sprintf("Expected a MachinePool but got a %T", o)) - } - +func (r *KubeadmConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, m *expv1.MachinePool) []ctrl.Request { result := []ctrl.Request{} configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef if configRef != nil && configRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("KubeadmConfig").GroupKind() { diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index c31920c8e3b1..c3a6f6a42b44 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -57,7 +57,7 @@ func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { g := NewWithT(t) cluster := builder.Cluster("my-cluster", metav1.NamespaceDefault).Build() objs := []client.Object{cluster} - machineObjs := []client.Object{} + machineObjs := []*clusterv1.Machine{} var expectedConfigName string for i := 0; i < 3; i++ { configName := fmt.Sprintf("my-config-%d", i) diff --git a/controllers/external/tracker.go b/controllers/external/tracker.go index 2dd88120d722..850b877fae61 100644 --- a/controllers/external/tracker.go +++ b/controllers/external/tracker.go @@ -23,8 +23,8 @@ import ( "github.com/go-logr/logr" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -42,7 +42,7 @@ type ObjectTracker struct { } // Watch uses the controller to issue a Watch only if the object hasn't been seen before. -func (o *ObjectTracker) Watch(log logr.Logger, obj runtime.Object, handler handler.EventHandler, p ...predicate.Predicate) error { +func (o *ObjectTracker) Watch(log logr.Logger, obj client.Object, handler handler.EventHandler, p ...predicate.Predicate) error { // Consider this a no-op if the controller isn't present. if o.Controller == nil { return nil @@ -59,9 +59,7 @@ func (o *ObjectTracker) Watch(log logr.Logger, obj runtime.Object, handler handl log.Info(fmt.Sprintf("Adding watch on external object %q", gvk.String())) err := o.Controller.Watch( - source.Kind(o.Cache, u), - handler, - append(p, predicates.ResourceNotPaused(log))..., + source.Kind(o.Cache, u).Prepare(handler, append(p, predicates.ResourceNotPaused(log, obj))...), ) if err != nil { o.m.Delete(key) diff --git a/controllers/external/tracker_test.go b/controllers/external/tracker_test.go index fd4c7d2dd9cf..553edb220513 100644 --- a/controllers/external/tracker_test.go +++ b/controllers/external/tracker_test.go @@ -24,9 +24,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -55,7 +53,7 @@ func newWatchCountController(raiseError bool) *watchCountController { } } -func (c *watchCountController) Watch(_ source.Source, _ handler.EventHandler, _ ...predicate.Predicate) error { +func (c *watchCountController) Watch(_ source.Source) error { c.count++ if c.raiseError { return errors.New("injected failure") diff --git a/controllers/remote/cluster_cache_reconciler.go b/controllers/remote/cluster_cache_reconciler.go index 748778c63021..2ae946ecd2ff 100644 --- a/controllers/remote/cluster_cache_reconciler.go +++ b/controllers/remote/cluster_cache_reconciler.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -43,9 +44,10 @@ type ClusterCacheReconciler struct { func (r *ClusterCacheReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { err := ctrl.NewControllerManagedBy(mgr). Named("remote/clustercache"). - For(&clusterv1.Cluster{}). + Add(builder.For(mgr, &clusterv1.Cluster{}, + predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Complete(r) if err != nil { diff --git a/controllers/remote/cluster_cache_tracker_test.go b/controllers/remote/cluster_cache_tracker_test.go index adbf30c45ebe..77eb0baf2937 100644 --- a/controllers/remote/cluster_cache_tracker_test.go +++ b/controllers/remote/cluster_cache_tracker_test.go @@ -30,6 +30,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -84,8 +85,10 @@ func TestClusterCacheTracker(t *testing.T) { c = &testController{ ch: make(chan string), } - w, err = ctrl.NewControllerManagedBy(mgr).For(&clusterv1.MachineDeployment{}).Build(c) + + watch, err := ctrl.NewControllerManagedBy(mgr).For(&clusterv1.MachineDeployment{}).Build(c) g.Expect(err).ToNot(HaveOccurred()) + w = &controller.ControllerAdapter{Controller: watch} mgrContext, mgrCancel = context.WithCancel(ctx) t.Log("Starting the manager") diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 22ba4f0ae2a1..5b66c49ccb3f 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -91,20 +91,22 @@ type KubeadmControlPlaneReconciler struct { func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { c, err := ctrl.NewControllerManagedBy(mgr). - For(&controlplanev1.KubeadmControlPlane{}). - Owns(&clusterv1.Machine{}). + Add(builder.For(mgr, + &controlplanev1.KubeadmControlPlane{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &controlplanev1.KubeadmControlPlane{}), + )). + Add(builder.Owns(mgr, + &controlplanev1.KubeadmControlPlane{}, + &clusterv1.Machine{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmControlPlane), - builder.WithPredicates( - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), - predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), - ), - ), - ).Build(r) + handler.EnqueueRequestsFromObjectMap(r.ClusterToKubeadmControlPlane), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + )).Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -574,12 +576,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, con // ClusterToKubeadmControlPlane is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for KubeadmControlPlane based on updates to a Cluster. -func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(_ context.Context, o client.Object) []ctrl.Request { - c, ok := o.(*clusterv1.Cluster) - if !ok { - panic(fmt.Sprintf("Expected a Cluster but got a %T", o)) - } - +func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(_ context.Context, c *clusterv1.Cluster) []ctrl.Request { controlPlaneRef := c.Spec.ControlPlaneRef if controlPlaneRef != nil && controlPlaneRef.Kind == kubeadmControlPlaneKind { return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} diff --git a/controlplane/kubeadm/internal/webhooks/scale.go b/controlplane/kubeadm/internal/webhooks/scale.go index 6ae6562873f0..4e90b23ac3fd 100644 --- a/controlplane/kubeadm/internal/webhooks/scale.go +++ b/controlplane/kubeadm/internal/webhooks/scale.go @@ -45,7 +45,7 @@ func (v *ScaleValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { // ScaleValidator validates KCP for replicas. type ScaleValidator struct { Client client.Reader - decoder *admission.Decoder + decoder admission.Decoder } // Handle will validate for number of replicas. diff --git a/exp/addons/internal/controllers/clusterresourceset_controller.go b/exp/addons/internal/controllers/clusterresourceset_controller.go index a9febf3242f9..7e944fbd655b 100644 --- a/exp/addons/internal/controllers/clusterresourceset_controller.go +++ b/exp/addons/internal/controllers/clusterresourceset_controller.go @@ -18,7 +18,6 @@ package controllers import ( "context" - "fmt" "time" "github.com/pkg/errors" @@ -67,11 +66,14 @@ type ClusterResourceSetReconciler struct { func (r *ClusterResourceSetReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { err := ctrl.NewControllerManagedBy(mgr). - For(&addonsv1.ClusterResourceSet{}). - Watches( + Add(builder.For(mgr, &addonsv1.ClusterResourceSet{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &addonsv1.ClusterResourceSet{}), + )). + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(r.clusterToClusterResourceSet), - ). + handler.EnqueueRequestsFromObjectMap(r.clusterToClusterResourceSet), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + )). WatchesMetadata( &corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(r.resourceToClusterResourceSet), @@ -87,7 +89,6 @@ func (r *ClusterResourceSetReconciler) SetupWithManager(ctx context.Context, mgr ), ). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") @@ -434,14 +435,8 @@ func (r *ClusterResourceSetReconciler) ensureResourceOwnerRef(ctx context.Contex } // clusterToClusterResourceSet is mapper function that maps clusters to ClusterResourceSet. -func (r *ClusterResourceSetReconciler) clusterToClusterResourceSet(ctx context.Context, o client.Object) []ctrl.Request { +func (r *ClusterResourceSetReconciler) clusterToClusterResourceSet(ctx context.Context, cluster *clusterv1.Cluster) []ctrl.Request { result := []ctrl.Request{} - - cluster, ok := o.(*clusterv1.Cluster) - if !ok { - panic(fmt.Sprintf("Expected a Cluster but got a %T", o)) - } - resourceList := &addonsv1.ClusterResourceSetList{} if err := r.Client.List(ctx, resourceList, client.InNamespace(cluster.Namespace)); err != nil { return nil diff --git a/exp/addons/internal/controllers/clusterresourcesetbinding_controller.go b/exp/addons/internal/controllers/clusterresourcesetbinding_controller.go index 93ac69079ca9..3bbfde969657 100644 --- a/exp/addons/internal/controllers/clusterresourcesetbinding_controller.go +++ b/exp/addons/internal/controllers/clusterresourcesetbinding_controller.go @@ -23,6 +23,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -50,12 +51,12 @@ type ClusterResourceSetBindingReconciler struct { func (r *ClusterResourceSetBindingReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { err := ctrl.NewControllerManagedBy(mgr). For(&addonsv1.ClusterResourceSetBinding{}). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(r.clusterToClusterResourceSetBinding), - ). + handler.EnqueueRequestsFromObjectMap(r.clusterToClusterResourceSetBinding), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") @@ -106,7 +107,7 @@ func (r *ClusterResourceSetBindingReconciler) Reconcile(ctx context.Context, req } // clusterToClusterResourceSetBinding is mapper function that maps clusters to ClusterResourceSetBinding. -func (r *ClusterResourceSetBindingReconciler) clusterToClusterResourceSetBinding(_ context.Context, o client.Object) []ctrl.Request { +func (r *ClusterResourceSetBindingReconciler) clusterToClusterResourceSetBinding(_ context.Context, o *clusterv1.Cluster) []ctrl.Request { return []reconcile.Request{ { NamespacedName: client.ObjectKey{ diff --git a/exp/internal/controllers/machinepool_controller.go b/exp/internal/controllers/machinepool_controller.go index dadaf09a8d8d..9db7630a89d3 100644 --- a/exp/internal/controllers/machinepool_controller.go +++ b/exp/internal/controllers/machinepool_controller.go @@ -18,7 +18,6 @@ package controllers import ( "context" - "fmt" "time" "github.com/pkg/errors" @@ -88,20 +87,17 @@ func (r *MachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.M } c, err := ctrl.NewControllerManagedBy(mgr). - For(&expv1.MachinePool{}). + Add(builder.For(mgr, + &expv1.MachinePool{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &expv1.MachinePool{}))). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToMachinePools), + handler.EnqueueRequestsFromObjectMap(clusterToMachinePools), // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? - builder.WithPredicates( - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), - ), - ), - ). + predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), + predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + )). Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") @@ -335,18 +331,13 @@ func (r *MachinePoolReconciler) watchClusterNodes(ctx context.Context, cluster * return r.Tracker.Watch(ctx, remote.WatchInput{ Name: "machinepool-watchNodes", Cluster: util.ObjectKey(cluster), - Watcher: r.controller, + Watcher: &controller.ControllerAdapter{Controller: r.controller}, Kind: &corev1.Node{}, - EventHandler: handler.EnqueueRequestsFromMapFunc(r.nodeToMachinePool), + EventHandler: handler.EnqueueRequestsFromObjectMapFunc(r.nodeToMachinePool), }) } -func (r *MachinePoolReconciler) nodeToMachinePool(ctx context.Context, o client.Object) []reconcile.Request { - node, ok := o.(*corev1.Node) - if !ok { - panic(fmt.Sprintf("Expected a Node but got a %T", o)) - } - +func (r *MachinePoolReconciler) nodeToMachinePool(ctx context.Context, node *corev1.Node) []reconcile.Request { var filters []client.ListOption // Match by clusterName when the node has the annotation. if clusterName, ok := node.GetAnnotations()[clusterv1.ClusterNameAnnotation]; ok { diff --git a/exp/internal/webhooks/machinepool.go b/exp/internal/webhooks/machinepool.go index a69850699260..5f52e091a1ee 100644 --- a/exp/internal/webhooks/machinepool.go +++ b/exp/internal/webhooks/machinepool.go @@ -55,7 +55,7 @@ func (webhook *MachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error { // MachinePool implements a validation and defaulting webhook for MachinePool. type MachinePool struct { - decoder *admission.Decoder + decoder admission.Decoder } var _ webhook.CustomValidator = &MachinePool{} diff --git a/exp/runtime/internal/controllers/extensionconfig_controller.go b/exp/runtime/internal/controllers/extensionconfig_controller.go index ffbe2ab3d793..f29ffe6e7dc2 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -61,13 +62,15 @@ type Reconciler struct { func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { err := ctrl.NewControllerManagedBy(mgr). - For(&runtimev1.ExtensionConfig{}). + Add(builder.For(mgr, + &runtimev1.ExtensionConfig{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &runtimev1.ExtensionConfig{}), + )). WatchesMetadata( &corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(r.secretToExtensionConfig), ). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") diff --git a/exp/util/util.go b/exp/util/util.go index cef656984454..0f82a919163c 100644 --- a/exp/util/util.go +++ b/exp/util/util.go @@ -89,15 +89,10 @@ func GetMachinePoolByLabels(ctx context.Context, c client.Client, namespace stri // MachinePoolToInfrastructureMapFunc returns a handler.MapFunc that watches for // MachinePool events and returns reconciliation requests for an infrastructure provider object. -func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.MapFunc { +func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.ObjectMapFunc[*expv1.MachinePool] { log = log.WithValues("machine-pool-to-infra-map-func", gvk.String()) - return func(_ context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*expv1.MachinePool) - if !ok { - log.V(4).Info("Not a machine pool", "Object", klog.KObj(o)) - return nil - } - log := log.WithValues("MachinePool", klog.KObj(o)) + return func(_ context.Context, m *expv1.MachinePool) []reconcile.Request { + log := log.WithValues("MachinePool", klog.KObj(m)) gk := gvk.GroupKind() ref := m.Spec.Template.Spec.InfrastructureRef diff --git a/go.mod b/go.mod index 7ac253820a83..d433675bd120 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,8 @@ module sigs.k8s.io/cluster-api go 1.22.0 +replace sigs.k8s.io/controller-runtime => github.com/Danil-Grigorev/controller-runtime v0.6.1-0.20240417125124-8984b3049571 + require ( github.com/MakeNowJust/heredoc v1.0.0 github.com/Masterminds/sprig/v3 v3.2.3 @@ -35,15 +37,15 @@ require ( golang.org/x/text v0.14.0 gomodules.xyz/jsonpatch/v2 v2.4.0 google.golang.org/grpc v1.59.0 - k8s.io/api v0.29.3 - k8s.io/apiextensions-apiserver v0.29.3 - k8s.io/apimachinery v0.29.3 - k8s.io/apiserver v0.29.3 - k8s.io/client-go v0.29.3 + k8s.io/api v0.30.0-rc.2 + k8s.io/apiextensions-apiserver v0.30.0-rc.2 + k8s.io/apimachinery v0.30.0-rc.2 + k8s.io/apiserver v0.30.0-rc.2 + k8s.io/client-go v0.30.0-rc.2 k8s.io/cluster-bootstrap v0.29.3 - k8s.io/component-base v0.29.3 - k8s.io/klog/v2 v2.110.1 - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + k8s.io/component-base v0.30.0-rc.2 + k8s.io/klog/v2 v2.120.1 + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 k8s.io/kubectl v0.29.3 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/controller-runtime v0.17.3 @@ -88,7 +90,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/cel-go v0.17.7 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect @@ -162,7 +164,7 @@ require ( golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.18.0 // indirect google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect @@ -174,7 +176,7 @@ require ( k8s.io/cli-runtime v0.29.3 // indirect k8s.io/component-helpers v0.29.3 // indirect k8s.io/metrics v0.29.3 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/go.sum b/go.sum index a365b554ab25..ff206e0a630b 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,8 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOEl github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Danil-Grigorev/controller-runtime v0.6.1-0.20240417125124-8984b3049571 h1:a1Oaf+Zk1mbhUP0wVULBOLZ+b4MXLW6g/2kadPQg5yw= +github.com/Danil-Grigorev/controller-runtime v0.6.1-0.20240417125124-8984b3049571/go.mod h1:TLM3OvUJgcqHVBLVRlNylmfbOlOukMLFHtc6jo3EtIQ= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -152,7 +154,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -212,8 +213,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -730,8 +731,8 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -822,28 +823,28 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= -k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= -k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/api v0.30.0-rc.2 h1:wnrY4jFP4Kx7h/Ppg86D0dyctlKfiMSXHme004ptkCU= +k8s.io/api v0.30.0-rc.2/go.mod h1:AsZ3vl/SZOLpqzfIKhleVYl5R5ruyzhB3G08xpDEjPQ= +k8s.io/apiextensions-apiserver v0.30.0-rc.2 h1:nnQg+c72aanAIrrPSyds0jtazCjOQDHo2vpazxem/TI= +k8s.io/apiextensions-apiserver v0.30.0-rc.2/go.mod h1:Vfet39CooU8WJYMintiVVNCJhHHtiJ/+ZX3CgA7O+so= +k8s.io/apimachinery v0.30.0-rc.2 h1:Q1JPqws5zCGjRwKtLW8ZKOY8lvl6aJejqIixJlHoAhc= +k8s.io/apimachinery v0.30.0-rc.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.0-rc.2 h1:FGIjvgG6HrOjjeVQKSI2qItT6dXbmYKTD1KbBW8TsIo= +k8s.io/apiserver v0.30.0-rc.2/go.mod h1:Qs+prNQNN52O3tGv5Krq9r1Cm2rqz2+r+LCkM50dJNw= k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/client-go v0.30.0-rc.2 h1:AqXSYq6s2BIr4WqK2dXGebxLPIsN48cMYjP71aXKspM= +k8s.io/client-go v0.30.0-rc.2/go.mod h1:vCtim9VeBumah2j1nZ/95O0V7F4Ad8N0wwCkSkgOE+Y= k8s.io/cluster-bootstrap v0.29.3 h1:DIMDZSN8gbFMy9CS2mAS2Iqq/fIUG783WN/1lqi5TF8= k8s.io/cluster-bootstrap v0.29.3/go.mod h1:aPAg1VtXx3uRrx5qU2jTzR7p1rf18zLXWS+pGhiqPto= -k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= -k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/component-base v0.30.0-rc.2 h1:0Qa6faUg01rBp9VxU76B8PmK58rBcAGB+7r4ckpLtgI= +k8s.io/component-base v0.30.0-rc.2/go.mod h1:rdQm+7+FBi+t74zJKiKBYVgQJEiNRMqvESRh8/f5z5k= k8s.io/component-helpers v0.29.3 h1:1dqZswuZgT2ZMixYeORyCUOAApXxgsvjVSgfoUT+P4o= k8s.io/component-helpers v0.29.3/go.mod h1:yiDqbRQrnQY+sPju/bL7EkwDJb6LVOots53uZNMZBos= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= k8s.io/metrics v0.29.3 h1:nN+eavbMQ7Kuif2tIdTr2/F2ec2E/SIAWSruTZ+Ye6U= @@ -853,10 +854,8 @@ k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= -sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 7654b788aa8e..f756e0a8b66e 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -18,7 +18,6 @@ package cluster import ( "context" - "fmt" "path" "strings" "time" @@ -31,6 +30,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -78,13 +78,13 @@ type Reconciler struct { func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { c, err := ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.Cluster{}). - Watches( + Add(builder.For(mgr, &clusterv1.Cluster{}, predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}))). + Add(builder.Watches(mgr, &clusterv1.Machine{}, - handler.EnqueueRequestsFromMapFunc(r.controlPlaneMachineToCluster), - ). + handler.EnqueueRequestsFromObjectMap(r.controlPlaneMachineToCluster), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Build(r) if err != nil { @@ -511,11 +511,7 @@ func (r *Reconciler) reconcileControlPlaneInitialized(ctx context.Context, clust // controlPlaneMachineToCluster is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for Cluster to update its status.controlPlaneInitialized field. -func (r *Reconciler) controlPlaneMachineToCluster(ctx context.Context, o client.Object) []ctrl.Request { - m, ok := o.(*clusterv1.Machine) - if !ok { - panic(fmt.Sprintf("Expected a Machine but got a %T", o)) - } +func (r *Reconciler) controlPlaneMachineToCluster(ctx context.Context, m *clusterv1.Machine) []ctrl.Request { if !util.IsControlPlaneMachine(m) { return nil } diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index 7e505b7d13b8..308ef231cae1 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -493,7 +493,7 @@ func TestClusterReconcilerNodeRef(t *testing.T) { tests := []struct { name string - o client.Object + o *clusterv1.Machine want []ctrl.Request }{ { diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index 9e5d3821cf59..a62f72ba04d4 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -32,6 +32,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -72,15 +73,12 @@ type Reconciler struct { } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - err := ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.ClusterClass{}). + err := ctrl.NewControllerManagedBy(mgr).Add(builder.For(mgr, &clusterv1.ClusterClass{}, predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.ClusterClass{}))). Named("clusterclass"). WithOptions(options). - Watches( - &runtimev1.ExtensionConfig{}, - handler.EnqueueRequestsFromMapFunc(r.extensionConfigToClusterClass), - ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + Add(builder.Watches(mgr, &runtimev1.ExtensionConfig{}, + handler.EnqueueRequestsFromObjectMap(r.extensionConfigToClusterClass), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &runtimev1.ExtensionConfig{}))). Complete(r) if err != nil { @@ -393,13 +391,9 @@ func uniqueObjectRefKey(ref *corev1.ObjectReference) string { // extensionConfigToClusterClass maps an ExtensionConfigs to the corresponding ClusterClass to reconcile them on updates // of the ExtensionConfig. -func (r *Reconciler) extensionConfigToClusterClass(ctx context.Context, o client.Object) []reconcile.Request { +func (r *Reconciler) extensionConfigToClusterClass(ctx context.Context, ext *runtimev1.ExtensionConfig) []reconcile.Request { res := []ctrl.Request{} log := ctrl.LoggerFrom(ctx) - ext, ok := o.(*runtimev1.ExtensionConfig) - if !ok { - panic(fmt.Sprintf("Expected an ExtensionConfig but got a %T", o)) - } clusterClasses := clusterv1.ClusterClassList{} selector, err := metav1.LabelSelectorAsSelector(ext.Spec.NamespaceSelector) diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index fb2668c5632a..032695071354 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -40,6 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -114,30 +115,31 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt } c, err := ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.Machine{}). + Add(builder.For(mgr, &clusterv1.Machine{}, predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}))). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToMachines), - builder.WithPredicates( - // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? - predicates.All(ctrl.LoggerFrom(ctx), - predicates.Any(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - predicates.ClusterControlPlaneInitialized(ctrl.LoggerFrom(ctx)), - ), - predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), + handler.EnqueueRequestsFromObjectMap(clusterToMachines), + // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? + predicate.Any( + predicate.Any( + predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), + predicates.ClusterControlPlaneInitialized(ctrl.LoggerFrom(ctx)), ), - )). - Watches( + predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + ), + )). + Add(builder.Watches(mgr, &clusterv1.MachineSet{}, - handler.EnqueueRequestsFromMapFunc(msToMachines), - ). - Watches( + handler.EnqueueRequestsFromObjectMap(msToMachines), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineSet{}), + )). + Add(builder.Watches(mgr, &clusterv1.MachineDeployment{}, - handler.EnqueueRequestsFromMapFunc(mdToMachines), - ). + handler.EnqueueRequestsFromObjectMap(mdToMachines), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineDeployment{}), + )). Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") @@ -857,18 +859,13 @@ func (r *Reconciler) watchClusterNodes(ctx context.Context, cluster *clusterv1.C return r.Tracker.Watch(ctx, remote.WatchInput{ Name: "machine-watchNodes", Cluster: util.ObjectKey(cluster), - Watcher: r.controller, + Watcher: &controller.ControllerAdapter{Controller: r.controller}, Kind: &corev1.Node{}, - EventHandler: handler.EnqueueRequestsFromMapFunc(r.nodeToMachine), + EventHandler: handler.EnqueueRequestsFromObjectMapFunc(r.nodeToMachine), }) } -func (r *Reconciler) nodeToMachine(ctx context.Context, o client.Object) []reconcile.Request { - node, ok := o.(*corev1.Node) - if !ok { - panic(fmt.Sprintf("Expected a Node but got a %T", o)) - } - +func (r *Reconciler) nodeToMachine(ctx context.Context, node *corev1.Node) []reconcile.Request { var filters []client.ListOption // Match by clusterName when the node has the annotation. if clusterName, ok := node.GetAnnotations()[clusterv1.ClusterNameAnnotation]; ok { diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index 48ebcabf3b02..b2470356e49f 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -143,7 +144,7 @@ func TestGetNode(t *testing.T) { g.Expect(tracker.Watch(ctx, remote.WatchInput{ Name: "TestGetNode", Cluster: util.ObjectKey(testCluster), - Watcher: w, + Watcher: &controller.ControllerAdapter{Controller: w}, Kind: &corev1.Node{}, EventHandler: handler.EnqueueRequestsFromMapFunc(func(context.Context, client.Object) []reconcile.Request { return nil diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 03edc482858e..1c95f5c9ef11 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -18,7 +18,6 @@ package machinedeployment import ( "context" - "fmt" "strings" "github.com/pkg/errors" @@ -81,25 +80,24 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt } err = ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.MachineDeployment{}). - Owns(&clusterv1.MachineSet{}). + Add(builder.For(mgr, + &clusterv1.MachineDeployment{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineDeployment{}))). + Add(builder.Owns(mgr, &clusterv1.MachineDeployment{}, &clusterv1.MachineSet{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineSet{}))). // Watches enqueues MachineDeployment for corresponding MachineSet resources, if no managed controller reference (owner) exists. - Watches( + Add(builder.Watches(mgr, &clusterv1.MachineSet{}, - handler.EnqueueRequestsFromMapFunc(r.MachineSetToDeployments), + handler.EnqueueRequestsFromObjectMap(r.MachineSetToDeployments), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineSet{})), ). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToMachineDeployments), - builder.WithPredicates( - // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - ), - ), - ).Complete(r) + handler.EnqueueRequestsFromObjectMap(clusterToMachineDeployments), + // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + )).Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -380,14 +378,9 @@ func (r *Reconciler) getMachineDeploymentsForMachineSet(ctx context.Context, ms // MachineSetToDeployments is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for MachineDeployments that might adopt an orphaned MachineSet. -func (r *Reconciler) MachineSetToDeployments(ctx context.Context, o client.Object) []ctrl.Request { +func (r *Reconciler) MachineSetToDeployments(ctx context.Context, ms *clusterv1.MachineSet) []ctrl.Request { result := []ctrl.Request{} - ms, ok := o.(*clusterv1.MachineSet) - if !ok { - panic(fmt.Sprintf("Expected a MachineSet but got a %T", o)) - } - // Check if the controller reference is already set and // return an empty result when one is found. for _, ref := range ms.ObjectMeta.GetOwnerReferences() { diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index b54f128cb41e..3c16c0a39afa 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -710,7 +710,7 @@ func TestMachineSetToDeployments(t *testing.T) { testsCases := []struct { machineSet clusterv1.MachineSet - mapObject client.Object + mapObject *clusterv1.MachineSet expected []reconcile.Request }{ { diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 08879720d839..95767a506fd2 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -86,24 +86,22 @@ type Reconciler struct { func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { c, err := ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.MachineHealthCheck{}). - Watches( + Add(builder.For(mgr, + &clusterv1.MachineHealthCheck{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineHealthCheck{}))). + Add(builder.Watches(mgr, &clusterv1.Machine{}, - handler.EnqueueRequestsFromMapFunc(r.machineToMachineHealthCheck), - ). + handler.EnqueueRequestsFromObjectMap(r.machineToMachineHealthCheck), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(r.clusterToMachineHealthCheck), - builder.WithPredicates( - // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), - ), - ), - ).Build(r) + handler.EnqueueRequestsFromObjectMap(r.clusterToMachineHealthCheck), + // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? + predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), + predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + )).Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -455,12 +453,7 @@ func (r *Reconciler) patchUnhealthyTargets(ctx context.Context, logger logr.Logg // clusterToMachineHealthCheck maps events from Cluster objects to // MachineHealthCheck objects that belong to the Cluster. -func (r *Reconciler) clusterToMachineHealthCheck(ctx context.Context, o client.Object) []reconcile.Request { - c, ok := o.(*clusterv1.Cluster) - if !ok { - panic(fmt.Sprintf("Expected a Cluster, got %T", o)) - } - +func (r *Reconciler) clusterToMachineHealthCheck(ctx context.Context, c *clusterv1.Cluster) []reconcile.Request { mhcList := &clusterv1.MachineHealthCheckList{} if err := r.Client.List( ctx, @@ -482,12 +475,7 @@ func (r *Reconciler) clusterToMachineHealthCheck(ctx context.Context, o client.O // machineToMachineHealthCheck maps events from Machine objects to // MachineHealthCheck objects that monitor the given machine. -func (r *Reconciler) machineToMachineHealthCheck(ctx context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*clusterv1.Machine) - if !ok { - panic(fmt.Sprintf("Expected a Machine, got %T", o)) - } - +func (r *Reconciler) machineToMachineHealthCheck(ctx context.Context, m *clusterv1.Machine) []reconcile.Request { mhcList := &clusterv1.MachineHealthCheckList{} if err := r.Client.List( ctx, @@ -509,12 +497,7 @@ func (r *Reconciler) machineToMachineHealthCheck(ctx context.Context, o client.O return requests } -func (r *Reconciler) nodeToMachineHealthCheck(ctx context.Context, o client.Object) []reconcile.Request { - node, ok := o.(*corev1.Node) - if !ok { - panic(fmt.Sprintf("Expected a corev1.Node, got %T", o)) - } - +func (r *Reconciler) nodeToMachineHealthCheck(ctx context.Context, node *corev1.Node) []reconcile.Request { machine, err := getMachineFromNode(ctx, r.Client, node.Name) if machine == nil || err != nil { return nil @@ -532,9 +515,9 @@ func (r *Reconciler) watchClusterNodes(ctx context.Context, cluster *clusterv1.C return r.Tracker.Watch(ctx, remote.WatchInput{ Name: "machinehealthcheck-watchClusterNodes", Cluster: util.ObjectKey(cluster), - Watcher: r.controller, + Watcher: &controller.ControllerAdapter{Controller: r.controller}, Kind: &corev1.Node{}, - EventHandler: handler.EnqueueRequestsFromMapFunc(r.nodeToMachineHealthCheck), + EventHandler: handler.EnqueueRequestsFromObjectMapFunc(r.nodeToMachineHealthCheck), }) } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 55d13ab5f931..576576fee6a6 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -1920,7 +1920,7 @@ func TestClusterToMachineHealthCheck(t *testing.T) { testCases := []struct { name string toCreate []clusterv1.MachineHealthCheck - object client.Object + object *clusterv1.Cluster expected []reconcile.Request }{ { @@ -1995,7 +1995,7 @@ func TestMachineToMachineHealthCheck(t *testing.T) { testCases := []struct { name string toCreate []clusterv1.MachineHealthCheck - object client.Object + object *clusterv1.Machine expected []reconcile.Request }{ { @@ -2082,7 +2082,7 @@ func TestNodeToMachineHealthCheck(t *testing.T) { name string mhcToCreate []clusterv1.MachineHealthCheck mToCreate []clusterv1.Machine - object client.Object + object *corev1.Node expected []reconcile.Request }{ { diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 05c9d325a8ad..ca2dd9c8bff4 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -98,26 +98,21 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt } err = ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.MachineSet{}). - Owns(&clusterv1.Machine{}). + Add(builder.For(mgr, &clusterv1.MachineSet{}, predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineSet{}))). + Add(builder.Owns(mgr, &clusterv1.MachineSet{}, &clusterv1.Machine{}, predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}))). // Watches enqueues MachineSet for corresponding Machine resources, if no managed controller reference (owner) exists. - Watches( + Add(builder.Watches(mgr, &clusterv1.Machine{}, - handler.EnqueueRequestsFromMapFunc(r.MachineToMachineSets), - ). + handler.EnqueueRequestsFromObjectMap(r.MachineToMachineSets), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToMachineSets), - builder.WithPredicates( - // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), - ), - ), - ).Complete(r) + handler.EnqueueRequestsFromObjectMap(clusterToMachineSets), + // TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources? + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + )).Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -772,14 +767,8 @@ func (r *Reconciler) waitForMachineDeletion(ctx context.Context, machineList []* // MachineToMachineSets is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for MachineSets that might adopt an orphaned Machine. -func (r *Reconciler) MachineToMachineSets(ctx context.Context, o client.Object) []ctrl.Request { +func (r *Reconciler) MachineToMachineSets(ctx context.Context, m *clusterv1.Machine) []ctrl.Request { result := []ctrl.Request{} - - m, ok := o.(*clusterv1.Machine) - if !ok { - panic(fmt.Sprintf("Expected a Machine but got a %T", o)) - } - log := ctrl.LoggerFrom(ctx, "Machine", klog.KObj(m)) // Check if the controller reference is already set and diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index ff323b18d52c..b853b4260869 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -632,7 +632,7 @@ func TestMachineSetToMachines(t *testing.T) { } testsCases := []struct { name string - mapObject client.Object + mapObject *clusterv1.Machine expected []reconcile.Request }{ { diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index 9e9af3ce1d54..a006239266e4 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -18,7 +18,6 @@ package cluster import ( "context" - "fmt" "time" "github.com/pkg/errors" @@ -92,29 +91,32 @@ type Reconciler struct { func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { c, err := ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.Cluster{}, builder.WithPredicates( + Add(builder.For(mgr, &clusterv1.Cluster{}, // Only reconcile Cluster with topology. + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), predicates.ClusterHasTopology(ctrl.LoggerFrom(ctx)), )). Named("topology/cluster"). - Watches( + Add(builder.Watches(mgr, &clusterv1.ClusterClass{}, - handler.EnqueueRequestsFromMapFunc(r.clusterClassToCluster), - ). - Watches( + handler.EnqueueRequestsFromObjectMap(r.clusterClassToCluster), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.ClusterClass{}), + )). + Add(builder.Watches(mgr, &clusterv1.MachineDeployment{}, - handler.EnqueueRequestsFromMapFunc(r.machineDeploymentToCluster), + handler.EnqueueRequestsFromObjectMap(r.machineDeploymentToCluster), // Only trigger Cluster reconciliation if the MachineDeployment is topology owned. - builder.WithPredicates(predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx))), - ). - Watches( + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineDeployment{}), + predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx), &clusterv1.MachineDeployment{}), + )). + Add(builder.Watches(mgr, &expv1.MachinePool{}, - handler.EnqueueRequestsFromMapFunc(r.machinePoolToCluster), + handler.EnqueueRequestsFromObjectMap(r.machinePoolToCluster), // Only trigger Cluster reconciliation if the MachinePool is topology owned. - builder.WithPredicates(predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx))), - ). + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &expv1.MachinePool{}), + predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx), &expv1.MachinePool{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Build(r) if err != nil { @@ -297,7 +299,7 @@ func (r *Reconciler) setupDynamicWatches(ctx context.Context, s *scope.Scope) er if err := r.externalTracker.Watch(ctrl.LoggerFrom(ctx), s.Current.InfrastructureCluster, handler.EnqueueRequestForOwner(r.Client.Scheme(), r.Client.RESTMapper(), &clusterv1.Cluster{}), // Only trigger Cluster reconciliation if the InfrastructureCluster is topology owned. - predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx))); err != nil { + predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx), &clusterv1.Cluster{})); err != nil { return errors.Wrap(err, "error watching Infrastructure CR") } } @@ -305,7 +307,7 @@ func (r *Reconciler) setupDynamicWatches(ctx context.Context, s *scope.Scope) er if err := r.externalTracker.Watch(ctrl.LoggerFrom(ctx), s.Current.ControlPlane.Object, handler.EnqueueRequestForOwner(r.Client.Scheme(), r.Client.RESTMapper(), &clusterv1.Cluster{}), // Only trigger Cluster reconciliation if the ControlPlane is topology owned. - predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx))); err != nil { + predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx), &clusterv1.Cluster{})); err != nil { return errors.Wrap(err, "error watching ControlPlane CR") } } @@ -335,12 +337,7 @@ func (r *Reconciler) callBeforeClusterCreateHook(ctx context.Context, s *scope.S // clusterClassToCluster is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for Cluster to update when its own ClusterClass gets updated. -func (r *Reconciler) clusterClassToCluster(ctx context.Context, o client.Object) []ctrl.Request { - clusterClass, ok := o.(*clusterv1.ClusterClass) - if !ok { - panic(fmt.Sprintf("Expected a ClusterClass but got a %T", o)) - } - +func (r *Reconciler) clusterClassToCluster(ctx context.Context, clusterClass *clusterv1.ClusterClass) []ctrl.Request { clusterList := &clusterv1.ClusterList{} if err := r.Client.List( ctx, @@ -362,11 +359,7 @@ func (r *Reconciler) clusterClassToCluster(ctx context.Context, o client.Object) // machineDeploymentToCluster is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for Cluster to update when one of its own MachineDeployments gets updated. -func (r *Reconciler) machineDeploymentToCluster(_ context.Context, o client.Object) []ctrl.Request { - md, ok := o.(*clusterv1.MachineDeployment) - if !ok { - panic(fmt.Sprintf("Expected a MachineDeployment but got a %T", o)) - } +func (r *Reconciler) machineDeploymentToCluster(_ context.Context, md *clusterv1.MachineDeployment) []ctrl.Request { if md.Spec.ClusterName == "" { return nil } @@ -381,11 +374,7 @@ func (r *Reconciler) machineDeploymentToCluster(_ context.Context, o client.Obje // machinePoolToCluster is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for Cluster to update when one of its own MachinePools gets updated. -func (r *Reconciler) machinePoolToCluster(_ context.Context, o client.Object) []ctrl.Request { - mp, ok := o.(*expv1.MachinePool) - if !ok { - panic(fmt.Sprintf("Expected a MachinePool but got a %T", o)) - } +func (r *Reconciler) machinePoolToCluster(_ context.Context, mp *expv1.MachinePool) []ctrl.Request { if mp.Spec.ClusterName == "" { return nil } diff --git a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go index b12a66826811..2f8a9372742e 100644 --- a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go @@ -63,26 +63,18 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt } err = ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.MachineDeployment{}, - builder.WithPredicates( - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx)), - predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))), - ), - ). + Add(builder.For(mgr, &clusterv1.MachineDeployment{}, + predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx), &clusterv1.MachineDeployment{}), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineDeployment{}), + )). Named("topology/machinedeployment"). WithOptions(options). - WithEventFilter(predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToMachineDeployments), - builder.WithPredicates( - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - predicates.ClusterHasTopology(ctrl.LoggerFrom(ctx)), - ), - ), - ). + handler.EnqueueRequestsFromObjectMap(clusterToMachineDeployments), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterHasTopology(ctrl.LoggerFrom(ctx)), + )). Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") diff --git a/internal/controllers/topology/machineset/machineset_controller.go b/internal/controllers/topology/machineset/machineset_controller.go index f5f30bac1fd8..ff5a6630760b 100644 --- a/internal/controllers/topology/machineset/machineset_controller.go +++ b/internal/controllers/topology/machineset/machineset_controller.go @@ -65,26 +65,19 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt } err = ctrl.NewControllerManagedBy(mgr). - For(&clusterv1.MachineSet{}, - builder.WithPredicates( - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx)), - predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))), - ), - ). + Add(builder.For(mgr, &clusterv1.MachineSet{}, + predicates.ResourceIsTopologyOwned(ctrl.LoggerFrom(ctx), &clusterv1.MachineSet{}), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.MachineSet{}), + )). Named("topology/machineset"). WithOptions(options). - WithEventFilter(predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToMachineSets), - builder.WithPredicates( - predicates.All(ctrl.LoggerFrom(ctx), - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - predicates.ClusterHasTopology(ctrl.LoggerFrom(ctx)), - ), - ), - ). + handler.EnqueueRequestsFromObjectMap(clusterToMachineSets), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), + predicates.ClusterHasTopology(ctrl.LoggerFrom(ctx)), + )). Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") diff --git a/internal/webhooks/machinedeployment.go b/internal/webhooks/machinedeployment.go index 80b1203f5bae..843f6d0f6d12 100644 --- a/internal/webhooks/machinedeployment.go +++ b/internal/webhooks/machinedeployment.go @@ -58,7 +58,7 @@ func (webhook *MachineDeployment) SetupWebhookWithManager(mgr ctrl.Manager) erro // MachineDeployment implements a validation and defaulting webhook for MachineDeployment. type MachineDeployment struct { - decoder *admission.Decoder + decoder admission.Decoder } var _ webhook.CustomDefaulter = &MachineDeployment{} diff --git a/internal/webhooks/machineset.go b/internal/webhooks/machineset.go index 0b910cdda985..6fe5d90ccde5 100644 --- a/internal/webhooks/machineset.go +++ b/internal/webhooks/machineset.go @@ -59,7 +59,7 @@ func (webhook *MachineSet) SetupWebhookWithManager(mgr ctrl.Manager) error { // MachineSet implements a validation and defaulting webhook for MachineSet. type MachineSet struct { - decoder *admission.Decoder + decoder admission.Decoder } var _ webhook.CustomDefaulter = &MachineSet{} diff --git a/test/go.mod b/test/go.mod index 723318ae0f8b..fd6665cd0aa1 100644 --- a/test/go.mod +++ b/test/go.mod @@ -4,6 +4,8 @@ go 1.22.0 replace sigs.k8s.io/cluster-api => ../ +replace sigs.k8s.io/controller-runtime => github.com/Danil-Grigorev/controller-runtime v0.6.1-0.20240417125124-8984b3049571 + require ( github.com/blang/semver/v4 v4.0.0 github.com/docker/docker v26.0.1+incompatible @@ -23,13 +25,13 @@ require ( go.etcd.io/etcd/client/v3 v3.5.13 golang.org/x/net v0.24.0 google.golang.org/grpc v1.60.1 - k8s.io/api v0.29.3 - k8s.io/apiextensions-apiserver v0.29.3 - k8s.io/apimachinery v0.29.3 - k8s.io/apiserver v0.29.3 - k8s.io/client-go v0.29.3 - k8s.io/component-base v0.29.3 - k8s.io/klog/v2 v2.110.1 + k8s.io/api v0.30.0-rc.2 + k8s.io/apiextensions-apiserver v0.30.0-rc.2 + k8s.io/apimachinery v0.30.0-rc.2 + k8s.io/apiserver v0.30.0-rc.2 + k8s.io/client-go v0.30.0-rc.2 + k8s.io/component-base v0.30.0-rc.2 + k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 sigs.k8s.io/controller-runtime v0.17.3 @@ -80,7 +82,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/cel-go v0.17.7 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -160,7 +162,7 @@ require ( golang.org/x/term v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.18.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect @@ -173,9 +175,9 @@ require ( gotest.tools/v3 v3.4.0 // indirect k8s.io/cli-runtime v0.29.3 // indirect k8s.io/cluster-bootstrap v0.29.3 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubectl v0.29.3 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/test/go.sum b/test/go.sum index b0222dbcf28b..547ad02476cd 100644 --- a/test/go.sum +++ b/test/go.sum @@ -9,6 +9,8 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/Danil-Grigorev/controller-runtime v0.6.1-0.20240417125124-8984b3049571 h1:a1Oaf+Zk1mbhUP0wVULBOLZ+b4MXLW6g/2kadPQg5yw= +github.com/Danil-Grigorev/controller-runtime v0.6.1-0.20240417125124-8984b3049571/go.mod h1:TLM3OvUJgcqHVBLVRlNylmfbOlOukMLFHtc6jo3EtIQ= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -116,7 +118,6 @@ github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxI github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -157,8 +158,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -505,8 +506,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -562,34 +563,32 @@ gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= -k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= -k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/api v0.30.0-rc.2 h1:wnrY4jFP4Kx7h/Ppg86D0dyctlKfiMSXHme004ptkCU= +k8s.io/api v0.30.0-rc.2/go.mod h1:AsZ3vl/SZOLpqzfIKhleVYl5R5ruyzhB3G08xpDEjPQ= +k8s.io/apiextensions-apiserver v0.30.0-rc.2 h1:nnQg+c72aanAIrrPSyds0jtazCjOQDHo2vpazxem/TI= +k8s.io/apiextensions-apiserver v0.30.0-rc.2/go.mod h1:Vfet39CooU8WJYMintiVVNCJhHHtiJ/+ZX3CgA7O+so= +k8s.io/apimachinery v0.30.0-rc.2 h1:Q1JPqws5zCGjRwKtLW8ZKOY8lvl6aJejqIixJlHoAhc= +k8s.io/apimachinery v0.30.0-rc.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.0-rc.2 h1:FGIjvgG6HrOjjeVQKSI2qItT6dXbmYKTD1KbBW8TsIo= +k8s.io/apiserver v0.30.0-rc.2/go.mod h1:Qs+prNQNN52O3tGv5Krq9r1Cm2rqz2+r+LCkM50dJNw= k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/client-go v0.30.0-rc.2 h1:AqXSYq6s2BIr4WqK2dXGebxLPIsN48cMYjP71aXKspM= +k8s.io/client-go v0.30.0-rc.2/go.mod h1:vCtim9VeBumah2j1nZ/95O0V7F4Ad8N0wwCkSkgOE+Y= k8s.io/cluster-bootstrap v0.29.3 h1:DIMDZSN8gbFMy9CS2mAS2Iqq/fIUG783WN/1lqi5TF8= k8s.io/cluster-bootstrap v0.29.3/go.mod h1:aPAg1VtXx3uRrx5qU2jTzR7p1rf18zLXWS+pGhiqPto= -k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= -k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/component-base v0.30.0-rc.2 h1:0Qa6faUg01rBp9VxU76B8PmK58rBcAGB+7r4ckpLtgI= +k8s.io/component-base v0.30.0-rc.2/go.mod h1:rdQm+7+FBi+t74zJKiKBYVgQJEiNRMqvESRh8/f5z5k= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= -sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI= diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index a39046f503a3..2f351563f043 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -172,25 +172,28 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr } c, err := ctrl.NewControllerManagedBy(mgr). - For(&infraexpv1.DockerMachinePool{}). + Add(builder.For(mgr, + &infraexpv1.DockerMachinePool{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &infraexpv1.DockerMachinePool{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &expv1.MachinePool{}, - handler.EnqueueRequestsFromMapFunc(utilexp.MachinePoolToInfrastructureMapFunc( + handler.EnqueueRequestsFromObjectMap(utilexp.MachinePoolToInfrastructureMapFunc( infraexpv1.GroupVersion.WithKind("DockerMachinePool"), ctrl.LoggerFrom(ctx))), - ). - Watches( + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &expv1.MachinePool{}), + )). + Add(builder.Watches(mgr, &infrav1.DockerMachine{}, - handler.EnqueueRequestsFromMapFunc(dockerMachineToDockerMachinePool), - ). - Watches( + handler.EnqueueRequestsFromObjectMap(dockerMachineToDockerMachinePool), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &infrav1.DockerMachine{}), + )). + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToDockerMachinePools), - builder.WithPredicates( - predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), - ), - ).Build(r) + handler.EnqueueRequestsFromObjectMap(clusterToDockerMachinePools), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + )).Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -351,12 +354,7 @@ func setInfrastructureMachineKind(dockerMachinePool *infraexpv1.DockerMachinePoo } // dockerMachineToDockerMachinePool creates a mapping handler to transform DockerMachine to DockerMachinePools. -func dockerMachineToDockerMachinePool(_ context.Context, o client.Object) []ctrl.Request { - dockerMachine, ok := o.(*infrav1.DockerMachine) - if !ok { - panic(fmt.Sprintf("Expected a DockerMachine but got a %T", o)) - } - +func dockerMachineToDockerMachinePool(_ context.Context, dockerMachine *infrav1.DockerMachine) []ctrl.Request { for _, ownerRef := range dockerMachine.GetOwnerReferences() { gv, err := schema.ParseGroupVersion(ownerRef.APIVersion) if err != nil { diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index b5ace259e65c..21b5251c2861 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -199,16 +199,17 @@ func (r *DockerClusterReconciler) reconcileDelete(ctx context.Context, dockerClu // SetupWithManager will add watches for this controller. func (r *DockerClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { err := ctrl.NewControllerManagedBy(mgr). - For(&infrav1.DockerCluster{}). + Add(builder.For(mgr, + &infrav1.DockerCluster{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &infrav1.DockerCluster{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("DockerCluster"), mgr.GetClient(), &infrav1.DockerCluster{})), - builder.WithPredicates( - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - ), - ).Complete(r) + handler.EnqueueRequestsFromObjectMap(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("DockerCluster"), mgr.GetClient(), &infrav1.DockerCluster{})), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), + )).Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index 8b517275acfa..18fe38ca3754 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -485,24 +485,27 @@ func (r *DockerMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl } err = ctrl.NewControllerManagedBy(mgr). - For(&infrav1.DockerMachine{}). + Add(builder.For(mgr, + &infrav1.DockerMachine{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &infrav1.DockerMachine{}))). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Machine{}, - handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("DockerMachine"))), - ). - Watches( + handler.EnqueueRequestsFromObjectMap(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("DockerMachine"))), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}), + )). + Add(builder.Watches(mgr, &infrav1.DockerCluster{}, - handler.EnqueueRequestsFromMapFunc(r.DockerClusterToDockerMachines), - ). - Watches( + handler.EnqueueRequestsFromObjectMap(r.DockerClusterToDockerMachines), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &infrav1.DockerCluster{}), + )). + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToDockerMachines), - builder.WithPredicates( - predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), - ), - ).Complete(r) + handler.EnqueueRequestsFromObjectMap(clusterToDockerMachines), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + )).Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -511,13 +514,8 @@ func (r *DockerMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl // DockerClusterToDockerMachines is a handler.ToRequestsFunc to be used to enqueue // requests for reconciliation of DockerMachines. -func (r *DockerMachineReconciler) DockerClusterToDockerMachines(ctx context.Context, o client.Object) []ctrl.Request { +func (r *DockerMachineReconciler) DockerClusterToDockerMachines(ctx context.Context, c *infrav1.DockerCluster) []ctrl.Request { result := []ctrl.Request{} - c, ok := o.(*infrav1.DockerCluster) - if !ok { - panic(fmt.Sprintf("Expected a DockerCluster but got a %T", o)) - } - cluster, err := util.GetOwnerCluster(ctx, r.Client, c.ObjectMeta) switch { case apierrors.IsNotFound(err) || cluster == nil: diff --git a/test/infrastructure/inmemory/internal/controllers/inmemorycluster_controller.go b/test/infrastructure/inmemory/internal/controllers/inmemorycluster_controller.go index 96392a543e53..307c3fb5e52e 100644 --- a/test/infrastructure/inmemory/internal/controllers/inmemorycluster_controller.go +++ b/test/infrastructure/inmemory/internal/controllers/inmemorycluster_controller.go @@ -209,16 +209,16 @@ func (r *InMemoryClusterReconciler) reconcileDelete(_ context.Context, cluster * // SetupWithManager will add watches for this controller. func (r *InMemoryClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { err := ctrl.NewControllerManagedBy(mgr). - For(&infrav1.InMemoryCluster{}). + Add(builder.For(mgr, + &infrav1.InMemoryCluster{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &infrav1.InMemoryCluster{}))). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("InMemoryCluster"), mgr.GetClient(), &infrav1.InMemoryCluster{})), - builder.WithPredicates( - predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), - ), - ).Complete(r) + handler.EnqueueRequestsFromObjectMap(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("InMemoryCluster"), mgr.GetClient(), &infrav1.InMemoryCluster{})), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)), + )).Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } diff --git a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go index 51666e0d8193..491734c9bb20 100644 --- a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go +++ b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go @@ -1144,24 +1144,27 @@ func (r *InMemoryMachineReconciler) SetupWithManager(ctx context.Context, mgr ct } err = ctrl.NewControllerManagedBy(mgr). - For(&infrav1.InMemoryMachine{}). + Add(builder.For(mgr, + &infrav1.InMemoryMachine{}, + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &infrav1.InMemoryMachine{}), + )). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Watches( + Add(builder.Watches(mgr, &clusterv1.Machine{}, - handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("InMemoryMachine"))), - ). - Watches( + handler.EnqueueRequestsFromObjectMap(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("InMemoryMachine"))), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Machine{}), + )). + Add(builder.Watches(mgr, &infrav1.InMemoryCluster{}, - handler.EnqueueRequestsFromMapFunc(r.InMemoryClusterToInMemoryMachines), - ). - Watches( + handler.EnqueueRequestsFromObjectMap(r.InMemoryClusterToInMemoryMachines), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &infrav1.InMemoryCluster{}), + )). + Add(builder.Watches(mgr, &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(clusterToInMemoryMachines), - builder.WithPredicates( - predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), - ), - ).Complete(r) + handler.EnqueueRequestsFromObjectMap(clusterToInMemoryMachines), + predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue, &clusterv1.Cluster{}), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + )).Complete(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } @@ -1170,13 +1173,8 @@ func (r *InMemoryMachineReconciler) SetupWithManager(ctx context.Context, mgr ct // InMemoryClusterToInMemoryMachines is a handler.ToRequestsFunc to be used to enqueue // requests for reconciliation of InMemoryMachines. -func (r *InMemoryMachineReconciler) InMemoryClusterToInMemoryMachines(ctx context.Context, o client.Object) []ctrl.Request { +func (r *InMemoryMachineReconciler) InMemoryClusterToInMemoryMachines(ctx context.Context, c *infrav1.InMemoryCluster) []ctrl.Request { result := []ctrl.Request{} - c, ok := o.(*infrav1.InMemoryCluster) - if !ok { - panic(fmt.Sprintf("Expected a InMemoryCluster but got a %T", o)) - } - cluster, err := util.GetOwnerCluster(ctx, r.Client, c.ObjectMeta) switch { case apierrors.IsNotFound(err) || cluster == nil: diff --git a/util/predicates/cluster_predicates.go b/util/predicates/cluster_predicates.go index 92986f8c88b7..06fd4fa7329a 100644 --- a/util/predicates/cluster_predicates.go +++ b/util/predicates/cluster_predicates.go @@ -18,12 +18,8 @@ limitations under the License. package predicates import ( - "fmt" - "github.com/go-logr/logr" "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -32,16 +28,10 @@ import ( // ClusterCreateInfraReady returns a predicate that returns true for a create event when a cluster has Status.InfrastructureReady set as true // it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. -func ClusterCreateInfraReady(logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { +func ClusterCreateInfraReady(logger logr.Logger) predicate.ObjectFuncs[*clusterv1.Cluster] { + return predicate.ObjectFuncs[*clusterv1.Cluster]{ + CreateFunc: func(c *clusterv1.Cluster) bool { log := logger.WithValues("predicate", "ClusterCreateInfraReady", "eventType", "create") - - c, ok := e.Object.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.Object)) - return false - } log = log.WithValues("Cluster", klog.KObj(c)) // Only need to trigger a reconcile if the Cluster.Status.InfrastructureReady is true @@ -53,24 +43,15 @@ func ClusterCreateInfraReady(logger logr.Logger) predicate.Funcs { log.V(4).Info("Cluster infrastructure is not ready, blocking further processing") return false }, - UpdateFunc: func(event.UpdateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, } } // ClusterCreateNotPaused returns a predicate that returns true for a create event when a cluster has Spec.Paused set as false // it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. -func ClusterCreateNotPaused(logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { +func ClusterCreateNotPaused(logger logr.Logger) predicate.ObjectFuncs[*clusterv1.Cluster] { + return predicate.ObjectFuncs[*clusterv1.Cluster]{ + CreateFunc: func(c *clusterv1.Cluster) bool { log := logger.WithValues("predicate", "ClusterCreateNotPaused", "eventType", "create") - - c, ok := e.Object.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.Object)) - return false - } log = log.WithValues("Cluster", klog.KObj(c)) // Only need to trigger a reconcile if the Cluster.Spec.Paused is false @@ -82,28 +63,17 @@ func ClusterCreateNotPaused(logger logr.Logger) predicate.Funcs { log.V(4).Info("Cluster is paused, blocking further processing") return false }, - UpdateFunc: func(event.UpdateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, } } // ClusterUpdateInfraReady returns a predicate that returns true for an update event when a cluster has Status.InfrastructureReady changed from false to true // it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. -func ClusterUpdateInfraReady(logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { +func ClusterUpdateInfraReady(logger logr.Logger) predicate.ObjectFuncs[*clusterv1.Cluster] { + return predicate.ObjectFuncs[*clusterv1.Cluster]{ + UpdateFunc: func(oldCluster, newCluster *clusterv1.Cluster) bool { log := logger.WithValues("predicate", "ClusterUpdateInfraReady", "eventType", "update") - - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) - return false - } log = log.WithValues("Cluster", klog.KObj(oldCluster)) - newCluster := e.ObjectNew.(*clusterv1.Cluster) - if !oldCluster.Status.InfrastructureReady && newCluster.Status.InfrastructureReady { log.V(6).Info("Cluster infrastructure became ready, allowing further processing") return true @@ -112,28 +82,17 @@ func ClusterUpdateInfraReady(logger logr.Logger) predicate.Funcs { log.V(4).Info("Cluster infrastructure did not become ready, blocking further processing") return false }, - CreateFunc: func(event.CreateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, } } // ClusterUpdateUnpaused returns a predicate that returns true for an update event when a cluster has Spec.Paused changed from true to false // it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. -func ClusterUpdateUnpaused(logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { +func ClusterUpdateUnpaused(logger logr.Logger) predicate.ObjectFuncs[*clusterv1.Cluster] { + return predicate.ObjectFuncs[*clusterv1.Cluster]{ + UpdateFunc: func(oldCluster, newCluster *clusterv1.Cluster) bool { log := logger.WithValues("predicate", "ClusterUpdateUnpaused", "eventType", "update") - - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) - return false - } log = log.WithValues("Cluster", klog.KObj(oldCluster)) - newCluster := e.ObjectNew.(*clusterv1.Cluster) - if oldCluster.Spec.Paused && !newCluster.Spec.Paused { log.V(4).Info("Cluster was unpaused, allowing further processing") return true @@ -144,9 +103,6 @@ func ClusterUpdateUnpaused(logger logr.Logger) predicate.Funcs { log.V(6).Info("Cluster was not unpaused, blocking further processing") return false }, - CreateFunc: func(event.CreateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, } } @@ -161,11 +117,11 @@ func ClusterUpdateUnpaused(logger logr.Logger) predicate.Funcs { // handler.EnqueueRequestsFromMapFunc(clusterToMachines) // predicates.ClusterUnpaused(r.Log), // ) -func ClusterUnpaused(logger logr.Logger) predicate.Funcs { +func ClusterUnpaused(logger logr.Logger) predicate.ObjectPredicate[*clusterv1.Cluster] { log := logger.WithValues("predicate", "ClusterUnpaused") // Use any to ensure we process either create or update events we care about - return Any(log, ClusterCreateNotPaused(log), ClusterUpdateUnpaused(log)) + return predicate.Any(ClusterCreateNotPaused(log), ClusterUpdateUnpaused(log)) } // ClusterControlPlaneInitialized returns a Predicate that returns true on Update events @@ -177,20 +133,12 @@ func ClusterUnpaused(logger logr.Logger) predicate.Funcs { // handler.EnqueueRequestsFromMapFunc(clusterToMachines) // predicates.ClusterControlPlaneInitialized(r.Log), // ) -func ClusterControlPlaneInitialized(logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { +func ClusterControlPlaneInitialized(logger logr.Logger) predicate.ObjectPredicate[*clusterv1.Cluster] { + return predicate.ObjectFuncs[*clusterv1.Cluster]{ + UpdateFunc: func(oldCluster, newCluster *clusterv1.Cluster) bool { log := logger.WithValues("predicate", "ClusterControlPlaneInitialized", "eventType", "update") - - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) - return false - } log = log.WithValues("Cluster", klog.KObj(oldCluster)) - newCluster := e.ObjectNew.(*clusterv1.Cluster) - if !conditions.IsTrue(oldCluster, clusterv1.ControlPlaneInitializedCondition) && conditions.IsTrue(newCluster, clusterv1.ControlPlaneInitializedCondition) { log.V(6).Info("Cluster ControlPlaneInitialized was set, allow further processing") @@ -200,9 +148,6 @@ func ClusterControlPlaneInitialized(logger logr.Logger) predicate.Funcs { log.V(6).Info("Cluster ControlPlaneInitialized hasn't changed, blocking further processing") return false }, - CreateFunc: func(event.CreateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, } } @@ -218,52 +163,35 @@ func ClusterControlPlaneInitialized(logger logr.Logger) predicate.Funcs { // handler.EnqueueRequestsFromMapFunc(clusterToMachines) // predicates.ClusterUnpausedAndInfrastructureReady(r.Log), // ) -func ClusterUnpausedAndInfrastructureReady(logger logr.Logger) predicate.Funcs { +func ClusterUnpausedAndInfrastructureReady(logger logr.Logger) predicate.ObjectPredicate[*clusterv1.Cluster] { log := logger.WithValues("predicate", "ClusterUnpausedAndInfrastructureReady") // Only continue processing create events if both not paused and infrastructure is ready - createPredicates := All(log, ClusterCreateNotPaused(log), ClusterCreateInfraReady(log)) + createPredicates := predicate.All(ClusterCreateNotPaused(log), ClusterCreateInfraReady(log)) // Process update events if either Cluster is unpaused or infrastructure becomes ready - updatePredicates := Any(log, ClusterUpdateUnpaused(log), ClusterUpdateInfraReady(log)) + updatePredicates := predicate.Any(ClusterUpdateUnpaused(log), ClusterUpdateInfraReady(log)) // Use any to ensure we process either create or update events we care about - return Any(log, createPredicates, updatePredicates) + return predicate.Any(createPredicates, updatePredicates) } // ClusterHasTopology returns a Predicate that returns true when cluster.Spec.Topology // is NOT nil and false otherwise. -func ClusterHasTopology(logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - return processIfTopologyManaged(logger.WithValues("predicate", "ClusterHasTopology", "eventType", "update"), e.ObjectNew) - }, - CreateFunc: func(e event.CreateEvent) bool { - return processIfTopologyManaged(logger.WithValues("predicate", "ClusterHasTopology", "eventType", "create"), e.Object) - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return processIfTopologyManaged(logger.WithValues("predicate", "ClusterHasTopology", "eventType", "delete"), e.Object) - }, - GenericFunc: func(e event.GenericEvent) bool { - return processIfTopologyManaged(logger.WithValues("predicate", "ClusterHasTopology", "eventType", "generic"), e.Object) - }, - } +func ClusterHasTopology(logger logr.Logger) predicate.ObjectPredicate[*clusterv1.Cluster] { + return predicate.NewObjectPredicateFuncs(processIfTopologyManaged(logger.WithValues("predicate", "ClusterHasTopology"))) } -func processIfTopologyManaged(logger logr.Logger, object client.Object) bool { - cluster, ok := object.(*clusterv1.Cluster) - if !ok { - logger.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", object)) - return false - } +func processIfTopologyManaged(logger logr.Logger) func(*clusterv1.Cluster) bool { + return func(cluster *clusterv1.Cluster) bool { + log := logger.WithValues("Cluster", klog.KObj(cluster)) - log := logger.WithValues("Cluster", klog.KObj(cluster)) + if cluster.Spec.Topology != nil { + log.V(6).Info("Cluster has topology, allowing further processing") + return true + } - if cluster.Spec.Topology != nil { - log.V(6).Info("Cluster has topology, allowing further processing") - return true + log.V(6).Info("Cluster does not have topology, blocking further processing") + return false } - - log.V(6).Info("Cluster does not have topology, blocking further processing") - return false } diff --git a/util/predicates/cluster_predicates_test.go b/util/predicates/cluster_predicates_test.go index 253c1cbcc0e6..e0919c20d2fb 100644 --- a/util/predicates/cluster_predicates_test.go +++ b/util/predicates/cluster_predicates_test.go @@ -21,7 +21,6 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -88,12 +87,7 @@ func TestClusterControlplaneInitializedPredicate(t *testing.T) { for i := range testcases { tc := testcases[i] t.Run(tc.name, func(*testing.T) { - ev := event.UpdateEvent{ - ObjectOld: &tc.oldCluster, - ObjectNew: &tc.newCluster, - } - - g.Expect(predicate.Update(ev)).To(Equal(tc.expected)) + g.Expect(predicate.OnUpdate(&tc.oldCluster, &tc.newCluster)).To(Equal(tc.expected)) }) } } diff --git a/util/predicates/generic_predicates.go b/util/predicates/generic_predicates.go index ccce5de6d1da..121db8112940 100644 --- a/util/predicates/generic_predicates.go +++ b/util/predicates/generic_predicates.go @@ -130,21 +130,8 @@ func Any(logger logr.Logger, predicates ...predicate.Funcs) predicate.Funcs { // ResourceHasFilterLabel returns a predicate that returns true only if the provided resource contains // a label with the WatchLabel key and the configured label value exactly. -func ResourceHasFilterLabel(logger logr.Logger, labelValue string) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - return processIfLabelMatch(logger.WithValues("predicate", "ResourceHasFilterLabel", "eventType", "update"), e.ObjectNew, labelValue) - }, - CreateFunc: func(e event.CreateEvent) bool { - return processIfLabelMatch(logger.WithValues("predicate", "ResourceHasFilterLabel", "eventType", "create"), e.Object, labelValue) - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return processIfLabelMatch(logger.WithValues("predicate", "ResourceHasFilterLabel", "eventType", "delete"), e.Object, labelValue) - }, - GenericFunc: func(e event.GenericEvent) bool { - return processIfLabelMatch(logger.WithValues("predicate", "ResourceHasFilterLabel", "eventType", "generic"), e.Object, labelValue) - }, - } +func ResourceHasFilterLabel[T client.Object](logger logr.Logger, labelValue string, _ T) predicate.ObjectFuncs[T] { + return predicate.NewObjectPredicateFuncs(processIfLabelMatch[T](logger.WithValues("predicate", "ResourceHasFilterLabel"), labelValue)) } // ResourceNotPaused returns a Predicate that returns true only if the provided resource does not contain the @@ -161,54 +148,45 @@ func ResourceHasFilterLabel(logger logr.Logger, labelValue string) predicate.Fun // Build(r) // return err // } -func ResourceNotPaused(logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - return processIfNotPaused(logger.WithValues("predicate", "ResourceNotPaused", "eventType", "update"), e.ObjectNew) - }, - CreateFunc: func(e event.CreateEvent) bool { - return processIfNotPaused(logger.WithValues("predicate", "ResourceNotPaused", "eventType", "create"), e.Object) - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return processIfNotPaused(logger.WithValues("predicate", "ResourceNotPaused", "eventType", "delete"), e.Object) - }, - GenericFunc: func(e event.GenericEvent) bool { - return processIfNotPaused(logger.WithValues("predicate", "ResourceNotPaused", "eventType", "generic"), e.Object) - }, - } +func ResourceNotPaused[T client.Object](logger logr.Logger, _ T) predicate.ObjectFuncs[T] { + return predicate.NewObjectPredicateFuncs(processIfNotPaused[T](logger.WithValues("predicate", "ResourceNotPaused"))) } // ResourceNotPausedAndHasFilterLabel returns a predicate that returns true only if the // ResourceNotPaused and ResourceHasFilterLabel predicates return true. -func ResourceNotPausedAndHasFilterLabel(logger logr.Logger, labelValue string) predicate.Funcs { - return All(logger, ResourceNotPaused(logger), ResourceHasFilterLabel(logger, labelValue)) +func ResourceNotPausedAndHasFilterLabel[T client.Object](logger logr.Logger, labelValue string, o T) predicate.ObjectPredicate[T] { + return predicate.All(ResourceNotPaused(logger, o), ResourceHasFilterLabel(logger, labelValue, o)) } -func processIfNotPaused(logger logr.Logger, obj client.Object) bool { - kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) - log := logger.WithValues("namespace", obj.GetNamespace(), kind, obj.GetName()) - if annotations.HasPaused(obj) { - log.V(4).Info("Resource is paused, will not attempt to map resource") - return false +func processIfNotPaused[T client.Object](logger logr.Logger) func(T) bool { + return func(obj T) bool { + kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) + log := logger.WithValues("namespace", obj.GetNamespace(), kind, obj.GetName()) + if annotations.HasPaused(obj) { + log.V(4).Info("Resource is paused, will not attempt to map resource") + return false + } + log.V(6).Info("Resource is not paused, will attempt to map resource") + return true } - log.V(6).Info("Resource is not paused, will attempt to map resource") - return true } -func processIfLabelMatch(logger logr.Logger, obj client.Object, labelValue string) bool { - // Return early if no labelValue was set. - if labelValue == "" { - return true - } +func processIfLabelMatch[T client.Object](logger logr.Logger, labelValue string) func(T) bool { + return func(obj T) bool { + // Return early if no labelValue was set. + if labelValue == "" { + return true + } - kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) - log := logger.WithValues("namespace", obj.GetNamespace(), kind, obj.GetName()) - if labels.HasWatchLabel(obj, labelValue) { - log.V(6).Info("Resource matches label, will attempt to map resource") - return true + kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) + log := logger.WithValues("namespace", obj.GetNamespace(), kind, obj.GetName()) + if labels.HasWatchLabel(obj, labelValue) { + log.V(6).Info("Resource matches label, will attempt to map resource") + return true + } + log.V(4).Info("Resource does not match label, will not attempt to map resource") + return false } - log.V(4).Info("Resource does not match label, will not attempt to map resource") - return false } // ResourceIsNotExternallyManaged returns a predicate that returns true only if the resource does not contain @@ -245,32 +223,21 @@ func processIfNotExternallyManaged(logger logr.Logger, obj client.Object) bool { // ResourceIsTopologyOwned returns a predicate that returns true only if the resource has // the `topology.cluster.x-k8s.io/owned` label. -func ResourceIsTopologyOwned(logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - return processIfTopologyOwned(logger.WithValues("predicate", "ResourceIsTopologyOwned", "eventType", "update"), e.ObjectNew) - }, - CreateFunc: func(e event.CreateEvent) bool { - return processIfTopologyOwned(logger.WithValues("predicate", "ResourceIsTopologyOwned", "eventType", "create"), e.Object) - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return processIfTopologyOwned(logger.WithValues("predicate", "ResourceIsTopologyOwned", "eventType", "delete"), e.Object) - }, - GenericFunc: func(e event.GenericEvent) bool { - return processIfTopologyOwned(logger.WithValues("predicate", "ResourceIsTopologyOwned", "eventType", "generic"), e.Object) - }, - } +func ResourceIsTopologyOwned[T client.Object](logger logr.Logger, _ T) predicate.ObjectFuncs[T] { + return predicate.NewObjectPredicateFuncs(processIfTopologyOwned[T](logger.WithValues("predicate", "ResourceIsTopologyOwned"))) } -func processIfTopologyOwned(logger logr.Logger, obj client.Object) bool { - kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) - log := logger.WithValues("namespace", obj.GetNamespace(), kind, obj.GetName()) - if labels.IsTopologyOwned(obj) { - log.V(6).Info("Resource is topology owned, will attempt to map resource") - return true +func processIfTopologyOwned[T client.Object](logger logr.Logger) func(T) bool { + return func(obj T) bool { + kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) + log := logger.WithValues("namespace", obj.GetNamespace(), kind, obj.GetName()) + if labels.IsTopologyOwned(obj) { + log.V(6).Info("Resource is topology owned, will attempt to map resource") + return true + } + // We intentionally log this line only on level 6, because it will be very frequently + // logged for MachineDeployments and MachineSets not owned by a topology. + log.V(6).Info("Resource is not topology owned, will not attempt to map resource") + return false } - // We intentionally log this line only on level 6, because it will be very frequently - // logged for MachineDeployments and MachineSets not owned by a topology. - log.V(6).Info("Resource is not topology owned, will not attempt to map resource") - return false } diff --git a/util/util.go b/util/util.go index 1648cb3d1256..a274e1fd36d1 100644 --- a/util/util.go +++ b/util/util.go @@ -197,14 +197,9 @@ func ObjectKey(object metav1.Object) client.ObjectKey { // ClusterToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for // Cluster events and returns reconciliation requests for an infrastructure provider object. -func ClusterToInfrastructureMapFunc(ctx context.Context, gvk schema.GroupVersionKind, c client.Client, providerCluster client.Object) handler.MapFunc { +func ClusterToInfrastructureMapFunc(ctx context.Context, gvk schema.GroupVersionKind, c client.Client, providerCluster client.Object) handler.ObjectMapFunc[*clusterv1.Cluster] { log := ctrl.LoggerFrom(ctx) - return func(ctx context.Context, o client.Object) []reconcile.Request { - cluster, ok := o.(*clusterv1.Cluster) - if !ok { - return nil - } - + return func(ctx context.Context, cluster *clusterv1.Cluster) []reconcile.Request { // Return early if the InfrastructureRef is nil. if cluster.Spec.InfrastructureRef == nil { return nil @@ -265,13 +260,8 @@ func GetMachineByName(ctx context.Context, c client.Client, namespace, name stri // MachineToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for // Machine events and returns reconciliation requests for an infrastructure provider object. -func MachineToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { - return func(_ context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*clusterv1.Machine) - if !ok { - return nil - } - +func MachineToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.ObjectMapFunc[*clusterv1.Machine] { + return func(_ context.Context, m *clusterv1.Machine) []reconcile.Request { gk := gvk.GroupKind() // Return early if the GroupKind doesn't match what we expect. infraGK := m.Spec.InfrastructureRef.GroupVersionKind().GroupKind() @@ -473,7 +463,7 @@ func (k KubeAwareAPIVersions) Less(i, j int) bool { // Note: This function uses the passed in typed ObjectList and thus with the default client configuration all list calls // will be cached. // NB: The objects are required to have `clusterv1.ClusterNameLabel` applied. -func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { +func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.ObjectMapFunc[*clusterv1.Cluster], error) { gvk, err := apiutil.GVKForObject(ro, scheme) if err != nil { return nil, err @@ -495,12 +485,7 @@ func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme * return nil, err } - return func(ctx context.Context, o client.Object) []ctrl.Request { - cluster, ok := o.(*clusterv1.Cluster) - if !ok { - return nil - } - + return func(ctx context.Context, cluster *clusterv1.Cluster) []ctrl.Request { listOpts := []client.ListOption{ client.MatchingLabels{ clusterv1.ClusterNameLabel: cluster.Name, @@ -537,7 +522,7 @@ func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme * // MachineDeploymentToObjectsMapper returns a mapper function that gets a machinedeployment // and lists all objects for the object passed in and returns a list of requests. // NB: The objects are required to have `clusterv1.MachineDeploymentNameLabel` applied. -func MachineDeploymentToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { +func MachineDeploymentToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.ObjectMapFunc[*clusterv1.MachineDeployment], error) { gvk, err := apiutil.GVKForObject(ro, scheme) if err != nil { return nil, err @@ -559,12 +544,7 @@ func MachineDeploymentToObjectsMapper(c client.Client, ro client.ObjectList, sch return nil, err } - return func(ctx context.Context, o client.Object) []ctrl.Request { - md, ok := o.(*clusterv1.MachineDeployment) - if !ok { - return nil - } - + return func(ctx context.Context, md *clusterv1.MachineDeployment) []ctrl.Request { listOpts := []client.ListOption{ client.MatchingLabels{ clusterv1.MachineDeploymentNameLabel: md.Name, @@ -601,7 +581,7 @@ func MachineDeploymentToObjectsMapper(c client.Client, ro client.ObjectList, sch // MachineSetToObjectsMapper returns a mapper function that gets a machineset // and lists all objects for the object passed in and returns a list of requests. // NB: The objects are required to have `clusterv1.MachineSetNameLabel` applied. -func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { +func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.ObjectMapFunc[*clusterv1.MachineSet], error) { gvk, err := apiutil.GVKForObject(ro, scheme) if err != nil { return nil, err @@ -623,12 +603,7 @@ func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *ru return nil, err } - return func(ctx context.Context, o client.Object) []ctrl.Request { - ms, ok := o.(*clusterv1.MachineSet) - if !ok { - return nil - } - + return func(ctx context.Context, ms *clusterv1.MachineSet) []ctrl.Request { listOpts := []client.ListOption{ client.MatchingLabels{ clusterv1.MachineSetNameLabel: format.MustFormatValue(ms.Name), diff --git a/util/util_test.go b/util/util_test.go index cf2616e97ed7..b230ece55e4b 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -45,7 +45,7 @@ func TestMachineToInfrastructureMapFunc(t *testing.T) { testcases := []struct { name string input schema.GroupVersionKind - request client.Object + request *clusterv1.Machine output []reconcile.Request }{ {