diff --git a/controllers/remote/cluster_cache_healthcheck_test.go b/controllers/remote/cluster_cache_healthcheck_test.go index 3c0072f86ec1..ce55499b01d4 100644 --- a/controllers/remote/cluster_cache_healthcheck_test.go +++ b/controllers/remote/cluster_cache_healthcheck_test.go @@ -28,7 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/klog/v2/klogr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -76,9 +76,8 @@ func TestClusterCacheHealthCheck(t *testing.T) { k8sClient = mgr.GetClient() t.Log("Setting up a ClusterCacheTracker") - log := klogr.New() cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{ - Log: &log, + Log: &ctrl.Log, Indexes: []Index{NodeProviderIDIndex}, }) g.Expect(err).ToNot(HaveOccurred()) diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 376e3ceff05f..ace8ff12ee53 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -96,8 +96,8 @@ func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains { } // MachineInFailureDomainWithMostMachines returns the first matching failure domain with machines that has the most control-plane machines on it. -func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines collections.Machines) (*clusterv1.Machine, error) { - fd := c.FailureDomainWithMostMachines(machines) +func (c *ControlPlane) MachineInFailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) (*clusterv1.Machine, error) { + fd := c.FailureDomainWithMostMachines(ctx, machines) machinesInFailureDomain := machines.Filter(collections.InFailureDomains(fd)) machineToMark := machinesInFailureDomain.Oldest() if machineToMark == nil { @@ -116,7 +116,7 @@ func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines // FailureDomainWithMostMachines returns a fd which exists both in machines and control-plane machines and has the most // control-plane machines on it. -func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machines) *string { +func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) *string { // See if there are any Machines that are not in currently defined failure domains first. notInFailureDomains := machines.Filter( collections.Not(collections.InFailureDomains(c.FailureDomains().FilterControlPlane().GetIDs()...)), @@ -127,15 +127,15 @@ func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machin // in the cluster status. return notInFailureDomains.Oldest().Spec.FailureDomain } - return failuredomains.PickMost(c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines) + return failuredomains.PickMost(ctx, c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines) } // NextFailureDomainForScaleUp returns the failure domain with the fewest number of up-to-date machines. -func (c *ControlPlane) NextFailureDomainForScaleUp() *string { +func (c *ControlPlane) NextFailureDomainForScaleUp(ctx context.Context) *string { if len(c.Cluster.Status.FailureDomains.FilterControlPlane()) == 0 { return nil } - return failuredomains.PickFewest(c.FailureDomains().FilterControlPlane(), c.UpToDateMachines()) + return failuredomains.PickFewest(ctx, c.FailureDomains().FilterControlPlane(), c.UpToDateMachines()) } // InitialControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for an initializing control plane. diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index a8beb42264d9..b68fba77d5c7 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -53,12 +53,12 @@ func TestControlPlane(t *testing.T) { } t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(t *testing.T) { - g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("two")) + g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two")) }) t.Run("With some machines in non defined failure domains", func(t *testing.T) { controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown"))) - g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("unknown")) + g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown")) }) }) } diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 5975bf8dc5c4..b2a4e490a583 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -37,13 +37,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -1906,7 +1904,6 @@ kubernetesVersion: metav1.16.1`, kubeadmCM.DeepCopy(), } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := &fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -1964,7 +1961,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -2010,7 +2006,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -2071,7 +2066,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index 66608e60ae61..f1b0e659c873 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -39,7 +39,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte logger := ctrl.LoggerFrom(ctx) bootstrapSpec := controlPlane.InitialControlPlaneConfig() - fd := controlPlane.NextFailureDomainForScaleUp() + fd := controlPlane.NextFailureDomainForScaleUp(ctx) if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil { logger.Error(err, "Failed to create initial control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err) @@ -60,7 +60,7 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, // Create the bootstrap configuration bootstrapSpec := controlPlane.JoinControlPlaneConfig() - fd := controlPlane.NextFailureDomainForScaleUp() + fd := controlPlane.NextFailureDomainForScaleUp(ctx) if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil { logger.Error(err, "Failed to create additional control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err) @@ -79,7 +79,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( logger := ctrl.LoggerFrom(ctx) // Pick the Machine that we should scale down. - machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines) + machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down") } @@ -223,7 +223,7 @@ func preflightCheckCondition(kind string, obj conditions.Getter, condition clust return nil } -func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { +func selectMachineForScaleDown(ctx context.Context, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { machines := controlPlane.Machines switch { case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0: @@ -233,5 +233,5 @@ func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMach case outdatedMachines.Len() > 0: machines = outdatedMachines } - return controlPlane.MachineInFailureDomainWithMostMachines(machines) + return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines) } diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index 2ef540db42d4..b2f3267a6061 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -464,7 +464,7 @@ func TestSelectMachineForScaleDown(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - selectedMachine, err := selectMachineForScaleDown(tc.cp, tc.outDatedMachines) + selectedMachine, err := selectMachineForScaleDown(ctx, tc.cp, tc.outDatedMachines) if tc.expectErr { g.Expect(err).To(HaveOccurred()) diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index fbd75c926eb2..833d0f717e42 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -24,10 +24,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -72,7 +70,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -145,7 +142,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -219,7 +215,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -302,7 +297,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing objs = append(objs, n, m, kubeadmConfigMap()) machines[m.Name] = m fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -383,7 +377,6 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) // Set all the machines to `not ready` r := &KubeadmControlPlaneReconciler{ diff --git a/hack/tools/runtime-openapi-gen/main.go b/hack/tools/runtime-openapi-gen/main.go index 82ff98b7483e..346cd5ea4304 100644 --- a/hack/tools/runtime-openapi-gen/main.go +++ b/hack/tools/runtime-openapi-gen/main.go @@ -74,6 +74,6 @@ func main() { err = os.WriteFile(*outputFile, openAPIBytes, 0600) if err != nil { - klog.Exitf("Failed to write OpenAPI specification to file %q: %v", outputFile, err) + klog.Exitf("Failed to write OpenAPI specification to file %q: %v", *outputFile, err) } } diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 2d5df9f852da..c3c75b54aa76 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -22,7 +22,6 @@ import ( "sort" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -150,7 +149,7 @@ func (r *Reconciler) updateMachineSet(ctx context.Context, deployment *clusterv1 log := ctrl.LoggerFrom(ctx) // Compute the desired MachineSet. - updatedMS, err := r.computeDesiredMachineSet(deployment, ms, oldMSs, log) + updatedMS, err := r.computeDesiredMachineSet(ctx, deployment, ms, oldMSs) if err != nil { return nil, errors.Wrapf(err, "failed to update MachineSet %q", klog.KObj(ms)) } @@ -172,7 +171,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl log := ctrl.LoggerFrom(ctx) // Compute the desired MachineSet. - newMS, err := r.computeDesiredMachineSet(deployment, nil, oldMSs, log) + newMS, err := r.computeDesiredMachineSet(ctx, deployment, nil, oldMSs) if err != nil { return nil, errors.Wrap(err, "failed to create new MachineSet") } @@ -213,7 +212,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl // There are small differences in how we calculate the MachineSet depending on if it // is a create or update. Example: for a new MachineSet we have to calculate a new name, // while for an existing MachineSet we have to use the name of the existing MachineSet. -func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, log logr.Logger) (*clusterv1.MachineSet, error) { +func (r *Reconciler) computeDesiredMachineSet(ctx context.Context, deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) (*clusterv1.MachineSet, error) { var name string var uid types.UID var finalizers []string @@ -328,7 +327,7 @@ func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeplo desiredMS.Spec.Selector = *mdutil.CloneSelectorAndAddLabel(&deployment.Spec.Selector, clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) // Set annotations and .spec.template.annotations. - if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(log, deployment, oldMSs, existingMS); err != nil { + if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(ctx, deployment, oldMSs, existingMS); err != nil { return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute annotations") } desiredMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations) diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index d8a99f12cb13..e481573a5ce8 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -30,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/types" apirand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -555,8 +554,6 @@ func TestComputeDesiredMachineSet(t *testing.T) { }, } - log := klogr.New() - skeletonMSBasedOnMD := &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -577,7 +574,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS := skeletonMSBasedOnMD.DeepCopy() g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, nil, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, nil) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) @@ -590,7 +587,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS.Spec.Replicas = ptr.To[int32](2) // 4 (maxsurge+replicas) - 2 (replicas of old ms) = 2 g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, []*clusterv1.MachineSet{oldMS}, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, []*clusterv1.MachineSet{oldMS}) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) @@ -627,7 +624,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) @@ -668,7 +665,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, []*clusterv1.MachineSet{oldMS}, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, []*clusterv1.MachineSet{oldMS}) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) @@ -714,7 +711,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS.Spec.DeletePolicy = "" g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 772654333286..a8624ac2dade 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -18,6 +18,7 @@ limitations under the License. package mdutil import ( + "context" "fmt" "sort" "strconv" @@ -33,6 +34,7 @@ import ( intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/klog/v2" "k8s.io/utils/integer" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conversion" @@ -114,13 +116,14 @@ func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision str } // MaxRevision finds the highest revision in the machine sets. -func MaxRevision(allMSs []*clusterv1.MachineSet, logger logr.Logger) int64 { +func MaxRevision(ctx context.Context, allMSs []*clusterv1.MachineSet) int64 { + log := ctrl.LoggerFrom(ctx) + max := int64(0) for _, ms := range allMSs { if v, err := Revision(ms); err != nil { // Skip the machine sets when it failed to parse their revision information - logger.Error(err, "Couldn't parse revision for machine set, deployment controller will skip it when reconciling revisions", - "machineset", ms.Name) + log.Error(err, fmt.Sprintf("Couldn't parse revision for MachineSet %s, deployment controller will skip it when reconciling revisions", ms.Name)) } else if v > max { max = v } @@ -185,7 +188,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger // ComputeMachineSetAnnotations computes the annotations that should be set on the MachineSet. // Note: The passed in newMS is nil if the new MachineSet doesn't exist in the apiserver yet. -func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) { +func ComputeMachineSetAnnotations(ctx context.Context, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) { // Copy annotations from Deployment annotations while filtering out some annotations // that we don't want to propagate. annotations := map[string]string{} @@ -199,7 +202,7 @@ func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.Machine // The newMS's revision should be the greatest among all MSes. Usually, its revision number is newRevision (the max revision number // of all old MSes + 1). However, it's possible that some old MSes are deleted after the newMS revision being updated, and // newRevision becomes smaller than newMS's revision. We will never decrease a revision of a MachineSet. - maxOldRevision := MaxRevision(oldMSs, log) + maxOldRevision := MaxRevision(ctx, oldMSs) newRevisionInt := maxOldRevision + 1 newRevision := strconv.FormatInt(newRevisionInt, 10) if newMS != nil { diff --git a/internal/controllers/machinedeployment/mdutil/util_test.go b/internal/controllers/machinedeployment/mdutil/util_test.go index 3062c1626461..d4ba8c0b06b0 100644 --- a/internal/controllers/machinedeployment/mdutil/util_test.go +++ b/internal/controllers/machinedeployment/mdutil/util_test.go @@ -30,12 +30,16 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +var ( + ctx = ctrl.SetupSignalHandler() +) + func newDControllerRef(md *clusterv1.MachineDeployment) *metav1.OwnerReference { isController := true return &metav1.OwnerReference{ @@ -939,11 +943,10 @@ func TestComputeMachineSetAnnotations(t *testing.T) { }, } - log := klogr.New() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := ComputeMachineSetAnnotations(log, tt.deployment, tt.oldMSs, tt.ms) + got, err := ComputeMachineSetAnnotations(ctx, tt.deployment, tt.oldMSs, tt.ms) if tt.wantErr { g.Expect(err).ShouldNot(HaveOccurred()) } else { diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go index 8f27660c6a16..a4ab7382614e 100644 --- a/internal/test/envtest/environment.go +++ b/internal/test/envtest/environment.go @@ -42,7 +42,6 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" - "k8s.io/klog/v2/klogr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -72,11 +71,10 @@ import ( ) func init() { - klog.InitFlags(nil) - logger := klogr.New() + logger := klog.Background() // Use klog as the internal logger for this envtest environment. log.SetLogger(logger) - // Additionally force all of the controllers to use the Ginkgo logger. + // Additionally force all controllers to use the Ginkgo logger. ctrl.SetLogger(logger) // Add logger for ginkgo. klog.SetOutput(ginkgo.GinkgoWriter) diff --git a/util/failuredomains/failure_domains.go b/util/failuredomains/failure_domains.go index 6ef83615a0c3..50b3d87a604c 100644 --- a/util/failuredomains/failure_domains.go +++ b/util/failuredomains/failure_domains.go @@ -18,10 +18,12 @@ limitations under the License. package failuredomains import ( + "context" + "fmt" "sort" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" @@ -50,9 +52,9 @@ func (f failureDomainAggregations) Swap(i, j int) { } // PickMost returns a failure domain that is in machines and has most of the group of machines on. -func PickMost(failureDomains clusterv1.FailureDomains, groupMachines, machines collections.Machines) *string { +func PickMost(ctx context.Context, failureDomains clusterv1.FailureDomains, groupMachines, machines collections.Machines) *string { // orderDescending sorts failure domains according to all machines belonging to the group. - fds := orderDescending(failureDomains, groupMachines) + fds := orderDescending(ctx, failureDomains, groupMachines) for _, fd := range fds { for _, m := range machines { if m.Spec.FailureDomain == nil { @@ -67,8 +69,8 @@ func PickMost(failureDomains clusterv1.FailureDomains, groupMachines, machines c } // orderDescending returns the sorted failure domains in decreasing order. -func orderDescending(failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { - aggregations := pick(failureDomains, machines) +func orderDescending(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { + aggregations := pick(ctx, failureDomains, machines) if len(aggregations) == 0 { return nil } @@ -77,8 +79,8 @@ func orderDescending(failureDomains clusterv1.FailureDomains, machines collectio } // PickFewest returns the failure domain with the fewest number of machines. -func PickFewest(failureDomains clusterv1.FailureDomains, machines collections.Machines) *string { - aggregations := pick(failureDomains, machines) +func PickFewest(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) *string { + aggregations := pick(ctx, failureDomains, machines) if len(aggregations) == 0 { return nil } @@ -86,7 +88,9 @@ func PickFewest(failureDomains clusterv1.FailureDomains, machines collections.Ma return ptr.To(aggregations[0].id) } -func pick(failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { +func pick(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { + log := ctrl.LoggerFrom(ctx) + if len(failureDomains) == 0 { return failureDomainAggregations{} } @@ -105,7 +109,11 @@ func pick(failureDomains clusterv1.FailureDomains, machines collections.Machines } id := *m.Spec.FailureDomain if _, ok := failureDomains[id]; !ok { - klogr.New().Info("unknown failure domain", "machine-name", m.GetName(), "failure-domain-id", id, "known-failure-domains", failureDomains) + var knownFailureDomains []string + for failureDomainID := range failureDomains { + knownFailureDomains = append(knownFailureDomains, failureDomainID) + } + log.Info(fmt.Sprintf("Unknown failure domain %q for Machine %s (known failure domains: %v)", id, m.GetName(), knownFailureDomains)) continue } counters[id]++ diff --git a/util/failuredomains/failure_domains_test.go b/util/failuredomains/failure_domains_test.go index 12a4eb7cdc3b..e49c850087bf 100644 --- a/util/failuredomains/failure_domains_test.go +++ b/util/failuredomains/failure_domains_test.go @@ -21,11 +21,16 @@ import ( . "github.com/onsi/gomega" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" ) +var ( + ctx = ctrl.SetupSignalHandler() +) + func TestNewFailureDomainPicker(t *testing.T) { a := ptr.To("us-west-1a") b := ptr.To("us-west-1b") @@ -87,7 +92,7 @@ func TestNewFailureDomainPicker(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - fd := PickFewest(tc.fds, tc.machines) + fd := PickFewest(ctx, tc.fds, tc.machines) if tc.expected == nil { g.Expect(fd).To(BeNil()) } else { @@ -167,7 +172,7 @@ func TestNewFailureDomainPickMost(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - fd := PickMost(tc.fds, tc.machines, tc.machines) + fd := PickMost(ctx, tc.fds, tc.machines, tc.machines) if tc.expected == nil { g.Expect(fd).To(BeNil()) } else {