diff --git a/bootstrap/kubeadm/main.go b/bootstrap/kubeadm/main.go index 5ae2aeff4aff..8f39d94aa4fb 100644 --- a/bootstrap/kubeadm/main.go +++ b/bootstrap/kubeadm/main.go @@ -168,7 +168,7 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set log level: %v") + setupLog.Error(err, "failed to set default log level") os.Exit(1) } pflag.Parse() diff --git a/bootstrap/util/configowner.go b/bootstrap/util/configowner.go index 1729629bc81e..7ed7e2edbf24 100644 --- a/bootstrap/util/configowner.go +++ b/bootstrap/util/configowner.go @@ -191,7 +191,7 @@ func GetTypedOwnerByRef(ctx context.Context, c client.Client, ref *corev1.Object if err != nil { return nil, errors.Wrapf(err, "failed to construct object of type %s", ref.GroupVersionKind()) } - metaObj, ok := obj.(client.Object) + clientObj, ok := obj.(client.Object) if !ok { return nil, errors.Errorf("expected owner reference to refer to a client.Object, is actually %T", obj) } @@ -199,12 +199,12 @@ func GetTypedOwnerByRef(ctx context.Context, c client.Client, ref *corev1.Object Namespace: ref.Namespace, Name: ref.Name, } - err = c.Get(ctx, key, metaObj) + err = c.Get(ctx, key, clientObj) if err != nil { return nil, err } - content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(metaObj) + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(clientObj) if err != nil { return nil, err } diff --git a/bootstrap/util/configowner_test.go b/bootstrap/util/configowner_test.go index 7c884403be4f..7263e9513bd6 100644 --- a/bootstrap/util/configowner_test.go +++ b/bootstrap/util/configowner_test.go @@ -184,7 +184,8 @@ func TestHasNodeRefs(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ - Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -205,7 +206,8 @@ func TestHasNodeRefs(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ - Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ Name: "machine-name", @@ -237,7 +239,8 @@ func TestHasNodeRefs(t *testing.T) { { // No replicas specified (default is 1). No nodeRefs either. TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, @@ -247,7 +250,8 @@ func TestHasNodeRefs(t *testing.T) { { // 1 replica but no nodeRefs TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, @@ -260,7 +264,8 @@ func TestHasNodeRefs(t *testing.T) { { // 2 replicas but only 1 nodeRef TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, @@ -300,7 +305,8 @@ func TestHasNodeRefs(t *testing.T) { { // 1 replica (default) and 1 nodeRef TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, @@ -319,7 +325,8 @@ func TestHasNodeRefs(t *testing.T) { { // 2 replicas and nodeRefs TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, @@ -346,7 +353,8 @@ func TestHasNodeRefs(t *testing.T) { { // 0 replicas and 0 nodeRef TypeMeta: metav1.TypeMeta{ - Kind: "MachinePool", + APIVersion: expv1.GroupVersion.String(), + Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ Namespace: metav1.NamespaceDefault, diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 7316dcf83300..ca18608d60e4 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -1315,6 +1315,9 @@ func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.M return nil } +// applyMutators applies mutators to an object. +// Note: TypeMeta must always be set in the object because otherwise after conversion the +// resulting Unstructured would have an empty GVK. func applyMutators(object client.Object, mutators ...ResourceMutatorFunc) (*unstructured.Unstructured, error) { if object == nil { return nil, nil diff --git a/cmd/clusterctl/client/upgrade_test.go b/cmd/clusterctl/client/upgrade_test.go index 9b8ac75790f4..c5512807bbfe 100644 --- a/cmd/clusterctl/client/upgrade_test.go +++ b/cmd/clusterctl/client/upgrade_test.go @@ -181,10 +181,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { }, }, wantProviders: &clusterctlv1.ProviderList{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "ProviderList", - }, ListMeta: metav1.ListMeta{}, Items: []clusterctlv1.Provider{ // both providers should be upgraded fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"), @@ -209,10 +205,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { }, }, wantProviders: &clusterctlv1.ProviderList{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "ProviderList", - }, ListMeta: metav1.ListMeta{}, Items: []clusterctlv1.Provider{ // only one provider should be upgraded fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"), @@ -237,10 +229,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { }, }, wantProviders: &clusterctlv1.ProviderList{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "ProviderList", - }, ListMeta: metav1.ListMeta{}, Items: []clusterctlv1.Provider{ // only one provider should be upgraded fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"), @@ -265,10 +253,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) { }, }, wantProviders: &clusterctlv1.ProviderList{ - TypeMeta: metav1.TypeMeta{ - APIVersion: clusterctlv1.GroupVersion.String(), - Kind: "ProviderList", - }, ListMeta: metav1.ListMeta{}, Items: []clusterctlv1.Provider{ fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"), diff --git a/controllers/remote/cluster_cache_healthcheck_test.go b/controllers/remote/cluster_cache_healthcheck_test.go index 3c0072f86ec1..ce55499b01d4 100644 --- a/controllers/remote/cluster_cache_healthcheck_test.go +++ b/controllers/remote/cluster_cache_healthcheck_test.go @@ -28,7 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/klog/v2/klogr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -76,9 +76,8 @@ func TestClusterCacheHealthCheck(t *testing.T) { k8sClient = mgr.GetClient() t.Log("Setting up a ClusterCacheTracker") - log := klogr.New() cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{ - Log: &log, + Log: &ctrl.Log, Indexes: []Index{NodeProviderIDIndex}, }) g.Expect(err).ToNot(HaveOccurred()) diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 376e3ceff05f..ace8ff12ee53 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -96,8 +96,8 @@ func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains { } // MachineInFailureDomainWithMostMachines returns the first matching failure domain with machines that has the most control-plane machines on it. -func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines collections.Machines) (*clusterv1.Machine, error) { - fd := c.FailureDomainWithMostMachines(machines) +func (c *ControlPlane) MachineInFailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) (*clusterv1.Machine, error) { + fd := c.FailureDomainWithMostMachines(ctx, machines) machinesInFailureDomain := machines.Filter(collections.InFailureDomains(fd)) machineToMark := machinesInFailureDomain.Oldest() if machineToMark == nil { @@ -116,7 +116,7 @@ func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines // FailureDomainWithMostMachines returns a fd which exists both in machines and control-plane machines and has the most // control-plane machines on it. -func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machines) *string { +func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) *string { // See if there are any Machines that are not in currently defined failure domains first. notInFailureDomains := machines.Filter( collections.Not(collections.InFailureDomains(c.FailureDomains().FilterControlPlane().GetIDs()...)), @@ -127,15 +127,15 @@ func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machin // in the cluster status. return notInFailureDomains.Oldest().Spec.FailureDomain } - return failuredomains.PickMost(c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines) + return failuredomains.PickMost(ctx, c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines) } // NextFailureDomainForScaleUp returns the failure domain with the fewest number of up-to-date machines. -func (c *ControlPlane) NextFailureDomainForScaleUp() *string { +func (c *ControlPlane) NextFailureDomainForScaleUp(ctx context.Context) *string { if len(c.Cluster.Status.FailureDomains.FilterControlPlane()) == 0 { return nil } - return failuredomains.PickFewest(c.FailureDomains().FilterControlPlane(), c.UpToDateMachines()) + return failuredomains.PickFewest(ctx, c.FailureDomains().FilterControlPlane(), c.UpToDateMachines()) } // InitialControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for an initializing control plane. diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index a8beb42264d9..b68fba77d5c7 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -53,12 +53,12 @@ func TestControlPlane(t *testing.T) { } t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(t *testing.T) { - g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("two")) + g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two")) }) t.Run("With some machines in non defined failure domains", func(t *testing.T) { controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown"))) - g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("unknown")) + g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown")) }) }) } diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 5975bf8dc5c4..b2a4e490a583 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -37,13 +37,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -1906,7 +1904,6 @@ kubernetesVersion: metav1.16.1`, kubeadmCM.DeepCopy(), } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := &fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -1964,7 +1961,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -2010,7 +2006,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ @@ -2071,7 +2066,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index 66608e60ae61..f1b0e659c873 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -39,7 +39,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte logger := ctrl.LoggerFrom(ctx) bootstrapSpec := controlPlane.InitialControlPlaneConfig() - fd := controlPlane.NextFailureDomainForScaleUp() + fd := controlPlane.NextFailureDomainForScaleUp(ctx) if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil { logger.Error(err, "Failed to create initial control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err) @@ -60,7 +60,7 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, // Create the bootstrap configuration bootstrapSpec := controlPlane.JoinControlPlaneConfig() - fd := controlPlane.NextFailureDomainForScaleUp() + fd := controlPlane.NextFailureDomainForScaleUp(ctx) if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil { logger.Error(err, "Failed to create additional control plane Machine") r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err) @@ -79,7 +79,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( logger := ctrl.LoggerFrom(ctx) // Pick the Machine that we should scale down. - machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines) + machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down") } @@ -223,7 +223,7 @@ func preflightCheckCondition(kind string, obj conditions.Getter, condition clust return nil } -func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { +func selectMachineForScaleDown(ctx context.Context, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) { machines := controlPlane.Machines switch { case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0: @@ -233,5 +233,5 @@ func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMach case outdatedMachines.Len() > 0: machines = outdatedMachines } - return controlPlane.MachineInFailureDomainWithMostMachines(machines) + return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines) } diff --git a/controlplane/kubeadm/internal/controllers/scale_test.go b/controlplane/kubeadm/internal/controllers/scale_test.go index 2ef540db42d4..b2f3267a6061 100644 --- a/controlplane/kubeadm/internal/controllers/scale_test.go +++ b/controlplane/kubeadm/internal/controllers/scale_test.go @@ -464,7 +464,7 @@ func TestSelectMachineForScaleDown(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - selectedMachine, err := selectMachineForScaleDown(tc.cp, tc.outDatedMachines) + selectedMachine, err := selectMachineForScaleDown(ctx, tc.cp, tc.outDatedMachines) if tc.expectErr { g.Expect(err).To(HaveOccurred()) diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index fbd75c926eb2..833d0f717e42 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -24,10 +24,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -72,7 +70,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -145,7 +142,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -219,7 +215,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -302,7 +297,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing objs = append(objs, n, m, kubeadmConfigMap()) machines[m.Name] = m fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -383,7 +377,6 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr } fakeClient := newFakeClient(objs...) - log.SetLogger(klogr.New()) // Set all the machines to `not ready` r := &KubeadmControlPlaneReconciler{ diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index 2311d494379e..1ba3c7b01c33 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -171,7 +171,7 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set log level: %v") + setupLog.Error(err, "failed to set default log level") os.Exit(1) } pflag.Parse() diff --git a/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md b/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md index 790fc1ab002d..275ed022a6d1 100644 --- a/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md +++ b/docs/book/src/tasks/experimental-features/runtime-sdk/implement-extensions.md @@ -97,7 +97,7 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set log level: %v") + setupLog.Error(err, "failed to set default log level") os.Exit(1) } pflag.Parse() diff --git a/hack/tools/runtime-openapi-gen/main.go b/hack/tools/runtime-openapi-gen/main.go index 82ff98b7483e..346cd5ea4304 100644 --- a/hack/tools/runtime-openapi-gen/main.go +++ b/hack/tools/runtime-openapi-gen/main.go @@ -74,6 +74,6 @@ func main() { err = os.WriteFile(*outputFile, openAPIBytes, 0600) if err != nil { - klog.Exitf("Failed to write OpenAPI specification to file %q: %v", outputFile, err) + klog.Exitf("Failed to write OpenAPI specification to file %q: %v", *outputFile, err) } } diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 2d5df9f852da..c3c75b54aa76 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -22,7 +22,6 @@ import ( "sort" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -150,7 +149,7 @@ func (r *Reconciler) updateMachineSet(ctx context.Context, deployment *clusterv1 log := ctrl.LoggerFrom(ctx) // Compute the desired MachineSet. - updatedMS, err := r.computeDesiredMachineSet(deployment, ms, oldMSs, log) + updatedMS, err := r.computeDesiredMachineSet(ctx, deployment, ms, oldMSs) if err != nil { return nil, errors.Wrapf(err, "failed to update MachineSet %q", klog.KObj(ms)) } @@ -172,7 +171,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl log := ctrl.LoggerFrom(ctx) // Compute the desired MachineSet. - newMS, err := r.computeDesiredMachineSet(deployment, nil, oldMSs, log) + newMS, err := r.computeDesiredMachineSet(ctx, deployment, nil, oldMSs) if err != nil { return nil, errors.Wrap(err, "failed to create new MachineSet") } @@ -213,7 +212,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl // There are small differences in how we calculate the MachineSet depending on if it // is a create or update. Example: for a new MachineSet we have to calculate a new name, // while for an existing MachineSet we have to use the name of the existing MachineSet. -func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, log logr.Logger) (*clusterv1.MachineSet, error) { +func (r *Reconciler) computeDesiredMachineSet(ctx context.Context, deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) (*clusterv1.MachineSet, error) { var name string var uid types.UID var finalizers []string @@ -328,7 +327,7 @@ func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeplo desiredMS.Spec.Selector = *mdutil.CloneSelectorAndAddLabel(&deployment.Spec.Selector, clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) // Set annotations and .spec.template.annotations. - if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(log, deployment, oldMSs, existingMS); err != nil { + if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(ctx, deployment, oldMSs, existingMS); err != nil { return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute annotations") } desiredMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations) diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index d8a99f12cb13..e481573a5ce8 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -30,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/types" apirand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -555,8 +554,6 @@ func TestComputeDesiredMachineSet(t *testing.T) { }, } - log := klogr.New() - skeletonMSBasedOnMD := &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -577,7 +574,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS := skeletonMSBasedOnMD.DeepCopy() g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, nil, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, nil) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) @@ -590,7 +587,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS.Spec.Replicas = ptr.To[int32](2) // 4 (maxsurge+replicas) - 2 (replicas of old ms) = 2 g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, []*clusterv1.MachineSet{oldMS}, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, []*clusterv1.MachineSet{oldMS}) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) @@ -627,7 +624,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) @@ -668,7 +665,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, []*clusterv1.MachineSet{oldMS}, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, []*clusterv1.MachineSet{oldMS}) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) @@ -714,7 +711,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { expectedMS.Spec.DeletePolicy = "" g := NewWithT(t) - actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil) g.Expect(err).ToNot(HaveOccurred()) assertMachineSet(g, actualMS, expectedMS) }) diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 772654333286..a8624ac2dade 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -18,6 +18,7 @@ limitations under the License. package mdutil import ( + "context" "fmt" "sort" "strconv" @@ -33,6 +34,7 @@ import ( intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/klog/v2" "k8s.io/utils/integer" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conversion" @@ -114,13 +116,14 @@ func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision str } // MaxRevision finds the highest revision in the machine sets. -func MaxRevision(allMSs []*clusterv1.MachineSet, logger logr.Logger) int64 { +func MaxRevision(ctx context.Context, allMSs []*clusterv1.MachineSet) int64 { + log := ctrl.LoggerFrom(ctx) + max := int64(0) for _, ms := range allMSs { if v, err := Revision(ms); err != nil { // Skip the machine sets when it failed to parse their revision information - logger.Error(err, "Couldn't parse revision for machine set, deployment controller will skip it when reconciling revisions", - "machineset", ms.Name) + log.Error(err, fmt.Sprintf("Couldn't parse revision for MachineSet %s, deployment controller will skip it when reconciling revisions", ms.Name)) } else if v > max { max = v } @@ -185,7 +188,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger // ComputeMachineSetAnnotations computes the annotations that should be set on the MachineSet. // Note: The passed in newMS is nil if the new MachineSet doesn't exist in the apiserver yet. -func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) { +func ComputeMachineSetAnnotations(ctx context.Context, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) { // Copy annotations from Deployment annotations while filtering out some annotations // that we don't want to propagate. annotations := map[string]string{} @@ -199,7 +202,7 @@ func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.Machine // The newMS's revision should be the greatest among all MSes. Usually, its revision number is newRevision (the max revision number // of all old MSes + 1). However, it's possible that some old MSes are deleted after the newMS revision being updated, and // newRevision becomes smaller than newMS's revision. We will never decrease a revision of a MachineSet. - maxOldRevision := MaxRevision(oldMSs, log) + maxOldRevision := MaxRevision(ctx, oldMSs) newRevisionInt := maxOldRevision + 1 newRevision := strconv.FormatInt(newRevisionInt, 10) if newMS != nil { diff --git a/internal/controllers/machinedeployment/mdutil/util_test.go b/internal/controllers/machinedeployment/mdutil/util_test.go index 3062c1626461..d4ba8c0b06b0 100644 --- a/internal/controllers/machinedeployment/mdutil/util_test.go +++ b/internal/controllers/machinedeployment/mdutil/util_test.go @@ -30,12 +30,16 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +var ( + ctx = ctrl.SetupSignalHandler() +) + func newDControllerRef(md *clusterv1.MachineDeployment) *metav1.OwnerReference { isController := true return &metav1.OwnerReference{ @@ -939,11 +943,10 @@ func TestComputeMachineSetAnnotations(t *testing.T) { }, } - log := klogr.New() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := ComputeMachineSetAnnotations(log, tt.deployment, tt.oldMSs, tt.ms) + got, err := ComputeMachineSetAnnotations(ctx, tt.deployment, tt.oldMSs, tt.ms) if tt.wantErr { g.Expect(err).ShouldNot(HaveOccurred()) } else { diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go index 8f27660c6a16..a4ab7382614e 100644 --- a/internal/test/envtest/environment.go +++ b/internal/test/envtest/environment.go @@ -42,7 +42,6 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" - "k8s.io/klog/v2/klogr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -72,11 +71,10 @@ import ( ) func init() { - klog.InitFlags(nil) - logger := klogr.New() + logger := klog.Background() // Use klog as the internal logger for this envtest environment. log.SetLogger(logger) - // Additionally force all of the controllers to use the Ginkgo logger. + // Additionally force all controllers to use the Ginkgo logger. ctrl.SetLogger(logger) // Add logger for ginkgo. klog.SetOutput(ginkgo.GinkgoWriter) diff --git a/main.go b/main.go index 52a661f4f54d..c4497f18b5e8 100644 --- a/main.go +++ b/main.go @@ -231,7 +231,7 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set log level: %v") + setupLog.Error(err, "failed to set default log level") os.Exit(1) } pflag.Parse() diff --git a/test/extension/main.go b/test/extension/main.go index 10ca719cd5fc..0246efc601f0 100644 --- a/test/extension/main.go +++ b/test/extension/main.go @@ -110,7 +110,7 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set log level: %v") + setupLog.Error(err, "failed to set default log level") os.Exit(1) } pflag.Parse() diff --git a/test/infrastructure/docker/main.go b/test/infrastructure/docker/main.go index 8a4c8e27932e..81a4670f77f0 100644 --- a/test/infrastructure/docker/main.go +++ b/test/infrastructure/docker/main.go @@ -172,7 +172,7 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set log level: %v") + setupLog.Error(err, "failed to set default log level") os.Exit(1) } pflag.Parse() diff --git a/test/infrastructure/inmemory/main.go b/test/infrastructure/inmemory/main.go index 2c434dc75a4b..ba1898cb3968 100644 --- a/test/infrastructure/inmemory/main.go +++ b/test/infrastructure/inmemory/main.go @@ -166,7 +166,7 @@ func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { - setupLog.Error(err, "failed to set log level: %v") + setupLog.Error(err, "failed to set default log level") os.Exit(1) } pflag.Parse() diff --git a/util/failuredomains/failure_domains.go b/util/failuredomains/failure_domains.go index 6ef83615a0c3..50b3d87a604c 100644 --- a/util/failuredomains/failure_domains.go +++ b/util/failuredomains/failure_domains.go @@ -18,10 +18,12 @@ limitations under the License. package failuredomains import ( + "context" + "fmt" "sort" - "k8s.io/klog/v2/klogr" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" @@ -50,9 +52,9 @@ func (f failureDomainAggregations) Swap(i, j int) { } // PickMost returns a failure domain that is in machines and has most of the group of machines on. -func PickMost(failureDomains clusterv1.FailureDomains, groupMachines, machines collections.Machines) *string { +func PickMost(ctx context.Context, failureDomains clusterv1.FailureDomains, groupMachines, machines collections.Machines) *string { // orderDescending sorts failure domains according to all machines belonging to the group. - fds := orderDescending(failureDomains, groupMachines) + fds := orderDescending(ctx, failureDomains, groupMachines) for _, fd := range fds { for _, m := range machines { if m.Spec.FailureDomain == nil { @@ -67,8 +69,8 @@ func PickMost(failureDomains clusterv1.FailureDomains, groupMachines, machines c } // orderDescending returns the sorted failure domains in decreasing order. -func orderDescending(failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { - aggregations := pick(failureDomains, machines) +func orderDescending(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { + aggregations := pick(ctx, failureDomains, machines) if len(aggregations) == 0 { return nil } @@ -77,8 +79,8 @@ func orderDescending(failureDomains clusterv1.FailureDomains, machines collectio } // PickFewest returns the failure domain with the fewest number of machines. -func PickFewest(failureDomains clusterv1.FailureDomains, machines collections.Machines) *string { - aggregations := pick(failureDomains, machines) +func PickFewest(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) *string { + aggregations := pick(ctx, failureDomains, machines) if len(aggregations) == 0 { return nil } @@ -86,7 +88,9 @@ func PickFewest(failureDomains clusterv1.FailureDomains, machines collections.Ma return ptr.To(aggregations[0].id) } -func pick(failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { +func pick(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { + log := ctrl.LoggerFrom(ctx) + if len(failureDomains) == 0 { return failureDomainAggregations{} } @@ -105,7 +109,11 @@ func pick(failureDomains clusterv1.FailureDomains, machines collections.Machines } id := *m.Spec.FailureDomain if _, ok := failureDomains[id]; !ok { - klogr.New().Info("unknown failure domain", "machine-name", m.GetName(), "failure-domain-id", id, "known-failure-domains", failureDomains) + var knownFailureDomains []string + for failureDomainID := range failureDomains { + knownFailureDomains = append(knownFailureDomains, failureDomainID) + } + log.Info(fmt.Sprintf("Unknown failure domain %q for Machine %s (known failure domains: %v)", id, m.GetName(), knownFailureDomains)) continue } counters[id]++ diff --git a/util/failuredomains/failure_domains_test.go b/util/failuredomains/failure_domains_test.go index 12a4eb7cdc3b..e49c850087bf 100644 --- a/util/failuredomains/failure_domains_test.go +++ b/util/failuredomains/failure_domains_test.go @@ -21,11 +21,16 @@ import ( . "github.com/onsi/gomega" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" ) +var ( + ctx = ctrl.SetupSignalHandler() +) + func TestNewFailureDomainPicker(t *testing.T) { a := ptr.To("us-west-1a") b := ptr.To("us-west-1b") @@ -87,7 +92,7 @@ func TestNewFailureDomainPicker(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - fd := PickFewest(tc.fds, tc.machines) + fd := PickFewest(ctx, tc.fds, tc.machines) if tc.expected == nil { g.Expect(fd).To(BeNil()) } else { @@ -167,7 +172,7 @@ func TestNewFailureDomainPickMost(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - fd := PickMost(tc.fds, tc.machines, tc.machines) + fd := PickMost(ctx, tc.fds, tc.machines, tc.machines) if tc.expected == nil { g.Expect(fd).To(BeNil()) } else { diff --git a/util/patch/patch.go b/util/patch/patch.go index 388132273449..dc807abe7804 100644 --- a/util/patch/patch.go +++ b/util/patch/patch.go @@ -65,7 +65,7 @@ func NewHelper(obj client.Object, crClient client.Client) (*Helper, error) { } // Convert the object to unstructured to compare against our before copy. - unstructuredObj, err := toUnstructured(obj) + unstructuredObj, err := toUnstructured(obj, gvk) if err != nil { return nil, errors.Wrapf(err, "failed to create patch helper for %s %s: failed to convert object to Unstructured", gvk.Kind, klog.KObj(obj)) } @@ -105,7 +105,7 @@ func (h *Helper) Patch(ctx context.Context, obj client.Object, opts ...Option) e } // Convert the object to unstructured to compare against our before copy. - h.after, err = toUnstructured(obj) + h.after, err = toUnstructured(obj, gvk) if err != nil { return errors.Wrapf(err, "failed to patch %s %s: failed to convert object to Unstructured", h.gvk.Kind, klog.KObj(h.beforeObject)) } diff --git a/util/patch/utils.go b/util/patch/utils.go index 92b362672444..ef047f8d36a7 100644 --- a/util/patch/utils.go +++ b/util/patch/utils.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) type patchType string @@ -47,7 +48,9 @@ func unstructuredHasStatus(u *unstructured.Unstructured) bool { return ok } -func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { +// toUnstructured converts an object to Unstructured. +// We have to pass in a gvk as we can't rely on GVK being set in a runtime.Object. +func toUnstructured(obj runtime.Object, gvk schema.GroupVersionKind) (*unstructured.Unstructured, error) { // If the incoming object is already unstructured, perform a deep copy first // otherwise DefaultUnstructuredConverter ends up returning the inner map without // making a copy. @@ -58,7 +61,10 @@ func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { if err != nil { return nil, err } - return &unstructured.Unstructured{Object: rawMap}, nil + u := &unstructured.Unstructured{Object: rawMap} + u.SetGroupVersionKind(gvk) + + return u, nil } // unsafeUnstructuredCopy returns a shallow copy of the unstructured object given as input. diff --git a/util/patch/utils_test.go b/util/patch/utils_test.go index 75cf6b8960c2..df43575d6f15 100644 --- a/util/patch/utils_test.go +++ b/util/patch/utils_test.go @@ -22,6 +22,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -39,10 +40,17 @@ func TestToUnstructured(t *testing.T) { Paused: true, }, } - newObj, err := toUnstructured(obj) + gvk := schema.GroupVersionKind{ + Group: clusterv1.GroupVersion.Group, + Kind: "Cluster", + Version: clusterv1.GroupVersion.Version, + } + newObj, err := toUnstructured(obj, gvk) g.Expect(err).ToNot(HaveOccurred()) g.Expect(newObj.GetName()).To(Equal(obj.Name)) g.Expect(newObj.GetNamespace()).To(Equal(obj.Namespace)) + g.Expect(newObj.GetAPIVersion()).To(Equal(clusterv1.GroupVersion.String())) + g.Expect(newObj.GetKind()).To(Equal("Cluster")) // Change a spec field and validate that it stays the same in the incoming object. g.Expect(unstructured.SetNestedField(newObj.Object, false, "spec", "paused")).To(Succeed()) @@ -55,6 +63,7 @@ func TestToUnstructured(t *testing.T) { obj := &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "test.x.y.z/v1", + "kind": "TestKind", "metadata": map[string]interface{}{ "name": "test-1", "namespace": "namespace-1", @@ -64,11 +73,18 @@ func TestToUnstructured(t *testing.T) { }, }, } + gvk := schema.GroupVersionKind{ + Group: "test.x.y.z", + Kind: "TestKind", + Version: "v1", + } - newObj, err := toUnstructured(obj) + newObj, err := toUnstructured(obj, gvk) g.Expect(err).ToNot(HaveOccurred()) g.Expect(newObj.GetName()).To(Equal(obj.GetName())) g.Expect(newObj.GetNamespace()).To(Equal(obj.GetNamespace())) + g.Expect(newObj.GetAPIVersion()).To(Equal("test.x.y.z/v1")) + g.Expect(newObj.GetKind()).To(Equal("TestKind")) // Validate that the maps point to different addresses. g.Expect(obj.Object).ToNot(BeIdenticalTo(newObj.Object))