Skip to content

Commit

Permalink
fix linter findings & tests
Browse files Browse the repository at this point in the history
  • Loading branch information
sbueringer committed Jan 9, 2024
1 parent aa29d37 commit 6db5131
Show file tree
Hide file tree
Showing 29 changed files with 123 additions and 107 deletions.
2 changes: 1 addition & 1 deletion bootstrap/kubeadm/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
Expand Down
6 changes: 3 additions & 3 deletions bootstrap/util/configowner.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,20 +191,20 @@ func GetTypedOwnerByRef(ctx context.Context, c client.Client, ref *corev1.Object
if err != nil {
return nil, errors.Wrapf(err, "failed to construct object of type %s", ref.GroupVersionKind())
}
metaObj, ok := obj.(client.Object)
clientObj, ok := obj.(client.Object)
if !ok {
return nil, errors.Errorf("expected owner reference to refer to a client.Object, is actually %T", obj)
}
key := types.NamespacedName{
Namespace: ref.Namespace,
Name: ref.Name,
}
err = c.Get(ctx, key, metaObj)
err = c.Get(ctx, key, clientObj)
if err != nil {
return nil, err
}

content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(metaObj)
content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(clientObj)
if err != nil {
return nil, err
}
Expand Down
24 changes: 16 additions & 8 deletions bootstrap/util/configowner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,8 @@ func TestHasNodeRefs(t *testing.T) {
g := NewWithT(t)
machine := &clusterv1.Machine{
TypeMeta: metav1.TypeMeta{
Kind: "Machine",
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Machine",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-name",
Expand All @@ -205,7 +206,8 @@ func TestHasNodeRefs(t *testing.T) {
g := NewWithT(t)
machine := &clusterv1.Machine{
TypeMeta: metav1.TypeMeta{
Kind: "Machine",
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Machine",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-name",
Expand Down Expand Up @@ -237,7 +239,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// No replicas specified (default is 1). No nodeRefs either.
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Expand All @@ -247,7 +250,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 1 replica but no nodeRefs
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Expand All @@ -260,7 +264,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 2 replicas but only 1 nodeRef
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Expand Down Expand Up @@ -300,7 +305,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 1 replica (default) and 1 nodeRef
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Expand All @@ -319,7 +325,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 2 replicas and nodeRefs
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Expand All @@ -346,7 +353,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 0 replicas and 0 nodeRef
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Expand Down
3 changes: 3 additions & 0 deletions cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
Expand Up @@ -1315,6 +1315,9 @@ func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.M
return nil
}

// applyMutators applies mutators to an object.
// Note: TypeMeta must always be set in the object because otherwise after conversion the
// resulting Unstructured would have an empty GVK.
func applyMutators(object client.Object, mutators ...ResourceMutatorFunc) (*unstructured.Unstructured, error) {
if object == nil {
return nil, nil
Expand Down
16 changes: 0 additions & 16 deletions cmd/clusterctl/client/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,10 +181,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) {
},
},
wantProviders: &clusterctlv1.ProviderList{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterctlv1.GroupVersion.String(),
Kind: "ProviderList",
},
ListMeta: metav1.ListMeta{},
Items: []clusterctlv1.Provider{ // both providers should be upgraded
fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"),
Expand All @@ -209,10 +205,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) {
},
},
wantProviders: &clusterctlv1.ProviderList{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterctlv1.GroupVersion.String(),
Kind: "ProviderList",
},
ListMeta: metav1.ListMeta{},
Items: []clusterctlv1.Provider{ // only one provider should be upgraded
fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"),
Expand All @@ -237,10 +229,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) {
},
},
wantProviders: &clusterctlv1.ProviderList{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterctlv1.GroupVersion.String(),
Kind: "ProviderList",
},
ListMeta: metav1.ListMeta{},
Items: []clusterctlv1.Provider{ // only one provider should be upgraded
fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"),
Expand All @@ -265,10 +253,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) {
},
},
wantProviders: &clusterctlv1.ProviderList{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterctlv1.GroupVersion.String(),
Kind: "ProviderList",
},
ListMeta: metav1.ListMeta{},
Items: []clusterctlv1.Provider{
fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"),
Expand Down
5 changes: 2 additions & 3 deletions controllers/remote/cluster_cache_healthcheck_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
Expand Down Expand Up @@ -76,9 +76,8 @@ func TestClusterCacheHealthCheck(t *testing.T) {
k8sClient = mgr.GetClient()

t.Log("Setting up a ClusterCacheTracker")
log := klogr.New()
cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{
Log: &log,
Log: &ctrl.Log,
Indexes: []Index{NodeProviderIDIndex},
})
g.Expect(err).ToNot(HaveOccurred())
Expand Down
12 changes: 6 additions & 6 deletions controlplane/kubeadm/internal/control_plane.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains {
}

// MachineInFailureDomainWithMostMachines returns the first matching failure domain with machines that has the most control-plane machines on it.
func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines collections.Machines) (*clusterv1.Machine, error) {
fd := c.FailureDomainWithMostMachines(machines)
func (c *ControlPlane) MachineInFailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) (*clusterv1.Machine, error) {
fd := c.FailureDomainWithMostMachines(ctx, machines)
machinesInFailureDomain := machines.Filter(collections.InFailureDomains(fd))
machineToMark := machinesInFailureDomain.Oldest()
if machineToMark == nil {
Expand All @@ -116,7 +116,7 @@ func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines

// FailureDomainWithMostMachines returns a fd which exists both in machines and control-plane machines and has the most
// control-plane machines on it.
func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machines) *string {
func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) *string {
// See if there are any Machines that are not in currently defined failure domains first.
notInFailureDomains := machines.Filter(
collections.Not(collections.InFailureDomains(c.FailureDomains().FilterControlPlane().GetIDs()...)),
Expand All @@ -127,15 +127,15 @@ func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machin
// in the cluster status.
return notInFailureDomains.Oldest().Spec.FailureDomain
}
return failuredomains.PickMost(c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines)
return failuredomains.PickMost(ctx, c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines)
}

// NextFailureDomainForScaleUp returns the failure domain with the fewest number of up-to-date machines.
func (c *ControlPlane) NextFailureDomainForScaleUp() *string {
func (c *ControlPlane) NextFailureDomainForScaleUp(ctx context.Context) *string {
if len(c.Cluster.Status.FailureDomains.FilterControlPlane()) == 0 {
return nil
}
return failuredomains.PickFewest(c.FailureDomains().FilterControlPlane(), c.UpToDateMachines())
return failuredomains.PickFewest(ctx, c.FailureDomains().FilterControlPlane(), c.UpToDateMachines())
}

// InitialControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for an initializing control plane.
Expand Down
4 changes: 2 additions & 2 deletions controlplane/kubeadm/internal/control_plane_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ func TestControlPlane(t *testing.T) {
}

t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(t *testing.T) {
g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("two"))
g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two"))
})

t.Run("With some machines in non defined failure domains", func(t *testing.T) {
controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown")))
g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("unknown"))
g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown"))
})
})
}
Expand Down
6 changes: 0 additions & 6 deletions controlplane/kubeadm/internal/controllers/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,11 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
Expand Down Expand Up @@ -1906,7 +1904,6 @@ kubernetesVersion: metav1.16.1`,
kubeadmCM.DeepCopy(),
}
fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := &fakeWorkloadCluster{
Workload: &internal.Workload{
Expand Down Expand Up @@ -1964,7 +1961,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
Expand Down Expand Up @@ -2010,7 +2006,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
Expand Down Expand Up @@ -2071,7 +2066,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
Expand Down
10 changes: 5 additions & 5 deletions controlplane/kubeadm/internal/controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte
logger := ctrl.LoggerFrom(ctx)

bootstrapSpec := controlPlane.InitialControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil {
logger.Error(err, "Failed to create initial control plane Machine")
r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err)
Expand All @@ -60,7 +60,7 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context,

// Create the bootstrap configuration
bootstrapSpec := controlPlane.JoinControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil {
logger.Error(err, "Failed to create additional control plane Machine")
r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err)
Expand All @@ -79,7 +79,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
logger := ctrl.LoggerFrom(ctx)

// Pick the Machine that we should scale down.
machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines)
machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down")
}
Expand Down Expand Up @@ -223,7 +223,7 @@ func preflightCheckCondition(kind string, obj conditions.Getter, condition clust
return nil
}

func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) {
func selectMachineForScaleDown(ctx context.Context, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) {
machines := controlPlane.Machines
switch {
case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0:
Expand All @@ -233,5 +233,5 @@ func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMach
case outdatedMachines.Len() > 0:
machines = outdatedMachines
}
return controlPlane.MachineInFailureDomainWithMostMachines(machines)
return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines)
}
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/scale_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ func TestSelectMachineForScaleDown(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)

selectedMachine, err := selectMachineForScaleDown(tc.cp, tc.outDatedMachines)
selectedMachine, err := selectMachineForScaleDown(ctx, tc.cp, tc.outDatedMachines)

if tc.expectErr {
g.Expect(err).To(HaveOccurred())
Expand Down
7 changes: 0 additions & 7 deletions controlplane/kubeadm/internal/controllers/status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,8 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
Expand Down Expand Up @@ -72,7 +70,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())

fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy())
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
Expand Down Expand Up @@ -145,7 +142,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
Expand Down Expand Up @@ -219,7 +215,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
Expand Down Expand Up @@ -302,7 +297,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing
objs = append(objs, n, m, kubeadmConfigMap())
machines[m.Name] = m
fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
Expand Down Expand Up @@ -383,7 +377,6 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

// Set all the machines to `not ready`
r := &KubeadmControlPlaneReconciler{
Expand Down
2 changes: 1 addition & 1 deletion controlplane/kubeadm/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
Expand Down
Loading

0 comments on commit 6db5131

Please sign in to comment.