Skip to content

Commit

Permalink
Replace deprecated klogr.New()
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer buringerst@vmware.com
  • Loading branch information
sbueringer committed Jan 16, 2024
1 parent 3cf12d7 commit 4c5fa85
Show file tree
Hide file tree
Showing 15 changed files with 66 additions and 67 deletions.
5 changes: 2 additions & 3 deletions controllers/remote/cluster_cache_healthcheck_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
Expand Down Expand Up @@ -76,9 +76,8 @@ func TestClusterCacheHealthCheck(t *testing.T) {
k8sClient = mgr.GetClient()

t.Log("Setting up a ClusterCacheTracker")
log := klogr.New()
cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{
Log: &log,
Log: &ctrl.Log,
Indexes: []Index{NodeProviderIDIndex},
})
g.Expect(err).ToNot(HaveOccurred())
Expand Down
12 changes: 6 additions & 6 deletions controlplane/kubeadm/internal/control_plane.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains {
}

// MachineInFailureDomainWithMostMachines returns the first matching failure domain with machines that has the most control-plane machines on it.
func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines collections.Machines) (*clusterv1.Machine, error) {
fd := c.FailureDomainWithMostMachines(machines)
func (c *ControlPlane) MachineInFailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) (*clusterv1.Machine, error) {
fd := c.FailureDomainWithMostMachines(ctx, machines)
machinesInFailureDomain := machines.Filter(collections.InFailureDomains(fd))
machineToMark := machinesInFailureDomain.Oldest()
if machineToMark == nil {
Expand All @@ -116,7 +116,7 @@ func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines

// FailureDomainWithMostMachines returns a fd which exists both in machines and control-plane machines and has the most
// control-plane machines on it.
func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machines) *string {
func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) *string {
// See if there are any Machines that are not in currently defined failure domains first.
notInFailureDomains := machines.Filter(
collections.Not(collections.InFailureDomains(c.FailureDomains().FilterControlPlane().GetIDs()...)),
Expand All @@ -127,15 +127,15 @@ func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machin
// in the cluster status.
return notInFailureDomains.Oldest().Spec.FailureDomain
}
return failuredomains.PickMost(c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines)
return failuredomains.PickMost(ctx, c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines)
}

// NextFailureDomainForScaleUp returns the failure domain with the fewest number of up-to-date machines.
func (c *ControlPlane) NextFailureDomainForScaleUp() *string {
func (c *ControlPlane) NextFailureDomainForScaleUp(ctx context.Context) *string {
if len(c.Cluster.Status.FailureDomains.FilterControlPlane()) == 0 {
return nil
}
return failuredomains.PickFewest(c.FailureDomains().FilterControlPlane(), c.UpToDateMachines())
return failuredomains.PickFewest(ctx, c.FailureDomains().FilterControlPlane(), c.UpToDateMachines())
}

// InitialControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for an initializing control plane.
Expand Down
4 changes: 2 additions & 2 deletions controlplane/kubeadm/internal/control_plane_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ func TestControlPlane(t *testing.T) {
}

t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(t *testing.T) {
g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("two"))
g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two"))
})

t.Run("With some machines in non defined failure domains", func(t *testing.T) {
controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown")))
g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("unknown"))
g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown"))
})
})
}
Expand Down
6 changes: 0 additions & 6 deletions controlplane/kubeadm/internal/controllers/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,11 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
Expand Down Expand Up @@ -1906,7 +1904,6 @@ kubernetesVersion: metav1.16.1`,
kubeadmCM.DeepCopy(),
}
fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := &fakeWorkloadCluster{
Workload: &internal.Workload{
Expand Down Expand Up @@ -1964,7 +1961,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
Expand Down Expand Up @@ -2010,7 +2006,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
Expand Down Expand Up @@ -2071,7 +2066,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
Expand Down
10 changes: 5 additions & 5 deletions controlplane/kubeadm/internal/controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte
logger := ctrl.LoggerFrom(ctx)

bootstrapSpec := controlPlane.InitialControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil {
logger.Error(err, "Failed to create initial control plane Machine")
r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err)
Expand All @@ -60,7 +60,7 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context,

// Create the bootstrap configuration
bootstrapSpec := controlPlane.JoinControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil {
logger.Error(err, "Failed to create additional control plane Machine")
r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err)
Expand All @@ -79,7 +79,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
logger := ctrl.LoggerFrom(ctx)

// Pick the Machine that we should scale down.
machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines)
machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down")
}
Expand Down Expand Up @@ -223,7 +223,7 @@ func preflightCheckCondition(kind string, obj conditions.Getter, condition clust
return nil
}

func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) {
func selectMachineForScaleDown(ctx context.Context, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) {
machines := controlPlane.Machines
switch {
case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0:
Expand All @@ -233,5 +233,5 @@ func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMach
case outdatedMachines.Len() > 0:
machines = outdatedMachines
}
return controlPlane.MachineInFailureDomainWithMostMachines(machines)
return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines)
}
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/scale_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ func TestSelectMachineForScaleDown(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)

selectedMachine, err := selectMachineForScaleDown(tc.cp, tc.outDatedMachines)
selectedMachine, err := selectMachineForScaleDown(ctx, tc.cp, tc.outDatedMachines)

if tc.expectErr {
g.Expect(err).To(HaveOccurred())
Expand Down
7 changes: 0 additions & 7 deletions controlplane/kubeadm/internal/controllers/status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,8 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
Expand Down Expand Up @@ -72,7 +70,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())

fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy())
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
Expand Down Expand Up @@ -145,7 +142,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
Expand Down Expand Up @@ -219,7 +215,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
Expand Down Expand Up @@ -302,7 +297,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing
objs = append(objs, n, m, kubeadmConfigMap())
machines[m.Name] = m
fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
Expand Down Expand Up @@ -383,7 +377,6 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

// Set all the machines to `not ready`
r := &KubeadmControlPlaneReconciler{
Expand Down
2 changes: 1 addition & 1 deletion hack/tools/runtime-openapi-gen/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,6 @@ func main() {

err = os.WriteFile(*outputFile, openAPIBytes, 0600)
if err != nil {
klog.Exitf("Failed to write OpenAPI specification to file %q: %v", outputFile, err)
klog.Exitf("Failed to write OpenAPI specification to file %q: %v", *outputFile, err)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"sort"
"time"

"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
Expand Down Expand Up @@ -150,7 +149,7 @@ func (r *Reconciler) updateMachineSet(ctx context.Context, deployment *clusterv1
log := ctrl.LoggerFrom(ctx)

// Compute the desired MachineSet.
updatedMS, err := r.computeDesiredMachineSet(deployment, ms, oldMSs, log)
updatedMS, err := r.computeDesiredMachineSet(ctx, deployment, ms, oldMSs)
if err != nil {
return nil, errors.Wrapf(err, "failed to update MachineSet %q", klog.KObj(ms))
}
Expand All @@ -172,7 +171,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl
log := ctrl.LoggerFrom(ctx)

// Compute the desired MachineSet.
newMS, err := r.computeDesiredMachineSet(deployment, nil, oldMSs, log)
newMS, err := r.computeDesiredMachineSet(ctx, deployment, nil, oldMSs)
if err != nil {
return nil, errors.Wrap(err, "failed to create new MachineSet")
}
Expand Down Expand Up @@ -213,7 +212,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl
// There are small differences in how we calculate the MachineSet depending on if it
// is a create or update. Example: for a new MachineSet we have to calculate a new name,
// while for an existing MachineSet we have to use the name of the existing MachineSet.
func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, log logr.Logger) (*clusterv1.MachineSet, error) {
func (r *Reconciler) computeDesiredMachineSet(ctx context.Context, deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) (*clusterv1.MachineSet, error) {
var name string
var uid types.UID
var finalizers []string
Expand Down Expand Up @@ -328,7 +327,7 @@ func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeplo
desiredMS.Spec.Selector = *mdutil.CloneSelectorAndAddLabel(&deployment.Spec.Selector, clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue)

// Set annotations and .spec.template.annotations.
if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(log, deployment, oldMSs, existingMS); err != nil {
if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(ctx, deployment, oldMSs, existingMS); err != nil {
return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute annotations")
}
desiredMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/types"
apirand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
Expand Down Expand Up @@ -555,8 +554,6 @@ func TestComputeDesiredMachineSet(t *testing.T) {
},
}

log := klogr.New()

skeletonMSBasedOnMD := &clusterv1.MachineSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Expand All @@ -577,7 +574,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS := skeletonMSBasedOnMD.DeepCopy()

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, nil, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, nil)
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
Expand All @@ -590,7 +587,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS.Spec.Replicas = ptr.To[int32](2) // 4 (maxsurge+replicas) - 2 (replicas of old ms) = 2

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, []*clusterv1.MachineSet{oldMS}, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, []*clusterv1.MachineSet{oldMS})
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
Expand Down Expand Up @@ -627,7 +624,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil)
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
Expand Down Expand Up @@ -668,7 +665,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, []*clusterv1.MachineSet{oldMS}, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, []*clusterv1.MachineSet{oldMS})
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
Expand Down Expand Up @@ -714,7 +711,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS.Spec.DeletePolicy = ""

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil)
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
Expand Down
13 changes: 8 additions & 5 deletions internal/controllers/machinedeployment/mdutil/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License.
package mdutil

import (
"context"
"fmt"
"sort"
"strconv"
Expand All @@ -33,6 +34,7 @@ import (
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
"k8s.io/utils/integer"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conversion"
Expand Down Expand Up @@ -114,13 +116,14 @@ func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision str
}

// MaxRevision finds the highest revision in the machine sets.
func MaxRevision(allMSs []*clusterv1.MachineSet, logger logr.Logger) int64 {
func MaxRevision(ctx context.Context, allMSs []*clusterv1.MachineSet) int64 {
log := ctrl.LoggerFrom(ctx)

max := int64(0)
for _, ms := range allMSs {
if v, err := Revision(ms); err != nil {
// Skip the machine sets when it failed to parse their revision information
logger.Error(err, "Couldn't parse revision for machine set, deployment controller will skip it when reconciling revisions",
"machineset", ms.Name)
log.Error(err, fmt.Sprintf("Couldn't parse revision for MachineSet %s, deployment controller will skip it when reconciling revisions", ms.Name))
} else if v > max {
max = v
}
Expand Down Expand Up @@ -185,7 +188,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger

// ComputeMachineSetAnnotations computes the annotations that should be set on the MachineSet.
// Note: The passed in newMS is nil if the new MachineSet doesn't exist in the apiserver yet.
func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) {
func ComputeMachineSetAnnotations(ctx context.Context, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) {
// Copy annotations from Deployment annotations while filtering out some annotations
// that we don't want to propagate.
annotations := map[string]string{}
Expand All @@ -199,7 +202,7 @@ func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.Machine
// The newMS's revision should be the greatest among all MSes. Usually, its revision number is newRevision (the max revision number
// of all old MSes + 1). However, it's possible that some old MSes are deleted after the newMS revision being updated, and
// newRevision becomes smaller than newMS's revision. We will never decrease a revision of a MachineSet.
maxOldRevision := MaxRevision(oldMSs, log)
maxOldRevision := MaxRevision(ctx, oldMSs)
newRevisionInt := maxOldRevision + 1
newRevision := strconv.FormatInt(newRevisionInt, 10)
if newMS != nil {
Expand Down
Loading

0 comments on commit 4c5fa85

Please sign in to comment.