diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml index 0c8915b863c1..366faa3632e2 100644 --- a/.github/workflows/pr-golangci-lint.yaml +++ b/.github/workflows/pr-golangci-lint.yaml @@ -30,6 +30,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # tag=v3.7.0 with: - version: v1.55.2 + version: v1.56.1 args: --out-format=colored-line-number working-directory: ${{matrix.working-directory}} diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadm_types.go b/bootstrap/kubeadm/api/v1beta1/kubeadm_types.go index 653b4da552e8..67b4f51cd764 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadm_types.go +++ b/bootstrap/kubeadm/api/v1beta1/kubeadm_types.go @@ -569,7 +569,7 @@ func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { // String returns the string representation of the BootstrapTokenString. func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { + if bts.ID != "" && bts.Secret != "" { return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) } return "" diff --git a/bootstrap/kubeadm/api/v1beta1/kubeadm_types_test.go b/bootstrap/kubeadm/api/v1beta1/kubeadm_types_test.go index 98aacc8852ea..e8d1e562d84c 100644 --- a/bootstrap/kubeadm/api/v1beta1/kubeadm_types_test.go +++ b/bootstrap/kubeadm/api/v1beta1/kubeadm_types_test.go @@ -156,7 +156,7 @@ func roundtrip(input string, bts *BootstrapTokenString) error { var err error newbts := &BootstrapTokenString{} // If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string - if len(input) > 0 { + if input != "" { if err := json.Unmarshal([]byte(input), newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index ea814e14b5b7..cc667181760f 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -179,7 +179,7 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi key := client.ObjectKeyFromObject(config) actual := &corev1.Secret{} - t.Run("KubeadmConfig ownerReference is added on first reconcile", func(t *testing.T) { + t.Run("KubeadmConfig ownerReference is added on first reconcile", func(*testing.T) { _, err = k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) @@ -191,7 +191,7 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi g.Expect(controllerOwner.Name).To(Equal(config.Name)) }) - t.Run("KubeadmConfig ownerReference re-reconciled without error", func(t *testing.T) { + t.Run("KubeadmConfig ownerReference re-reconciled without error", func(*testing.T) { _, err = k.Reconcile(ctx, request) g.Expect(err).ToNot(HaveOccurred()) @@ -202,7 +202,7 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi g.Expect(controllerOwner.Kind).To(Equal(config.Kind)) g.Expect(controllerOwner.Name).To(Equal(config.Name)) }) - t.Run("non-KubeadmConfig controller OwnerReference is replaced", func(t *testing.T) { + t.Run("non-KubeadmConfig controller OwnerReference is replaced", func(*testing.T) { g.Expect(myclient.Get(ctx, key, actual)).To(Succeed()) actual.SetOwnerReferences([]metav1.OwnerReference{ @@ -741,7 +741,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { name: "Join a worker node with a fully compiled kubeadm config object", machinePool: newWorkerMachinePoolForCluster(cluster), configName: "workerpool-join-cfg", - configBuilder: func(namespace, name string) *bootstrapv1.KubeadmConfig { + configBuilder: func(namespace, _ string) *bootstrapv1.KubeadmConfig { return newWorkerJoinKubeadmConfig(namespace, "workerpool-join-cfg") }, }, diff --git a/bootstrap/kubeadm/internal/controllers/suite_test.go b/bootstrap/kubeadm/internal/controllers/suite_test.go index 5b1afdb40b52..dce30d9334f7 100644 --- a/bootstrap/kubeadm/internal/controllers/suite_test.go +++ b/bootstrap/kubeadm/internal/controllers/suite_test.go @@ -36,7 +36,7 @@ var ( ) func TestMain(m *testing.M) { - setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) { + setupReconcilers := func(_ context.Context, mgr ctrl.Manager) { var err error secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ HTTPClient: mgr.GetHTTPClient(), diff --git a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go index 9cd4a1a664f0..9d2feeff6e3a 100644 --- a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go +++ b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go @@ -176,7 +176,7 @@ func TestControlPlaneInitMutex_LockWithMachineDeletion(t *testing.T) { }, } for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { l := &ControlPlaneInitMutex{ client: tc.client, } diff --git a/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring.go b/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring.go index 0dca2e0b3b0a..2ad57c137bd0 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring.go +++ b/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring.go @@ -62,7 +62,7 @@ func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { // String returns the string representation of the BootstrapTokenString. func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { + if bts.ID != "" && bts.Secret != "" { return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) } return "" diff --git a/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring_test.go b/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring_test.go index 154ab8540ef1..c6f33b0430c1 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta2/bootstraptokenstring_test.go @@ -98,7 +98,7 @@ func roundtrip(input string, bts *BootstrapTokenString) error { var err error newbts := &BootstrapTokenString{} // If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string - if len(input) > 0 { + if input != "" { if err := json.Unmarshal([]byte(input), newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring.go b/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring.go index 2bb4fda07254..5b0b0aebc3ff 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring.go @@ -60,7 +60,7 @@ func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { // String returns the string representation of the BootstrapTokenString. func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { + if bts.ID != "" && bts.Secret != "" { return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) } return "" diff --git a/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring_test.go b/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring_test.go index 640c581a6dea..202244fa4e7d 100644 --- a/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring_test.go +++ b/bootstrap/kubeadm/types/upstreamv1beta3/bootstraptokenstring_test.go @@ -105,7 +105,7 @@ func roundtrip(input string, bts *BootstrapTokenString) error { var err error newbts := &BootstrapTokenString{} // If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string - if len(input) > 0 { + if input != "" { if err := json.Unmarshal([]byte(input), newbts); err != nil { return errors.Wrap(err, "expected no unmarshal error, got error") } diff --git a/cmd/clusterctl/client/client_test.go b/cmd/clusterctl/client/client_test.go index 90c93f92110a..36b3d61fd1fe 100644 --- a/cmd/clusterctl/client/client_test.go +++ b/cmd/clusterctl/client/client_test.go @@ -176,7 +176,7 @@ func newFakeClient(ctx context.Context, configClient config.Client) *fakeClient fake.internalClient, _ = newClusterctlClient(ctx, "fake-config", InjectConfig(fake.configClient), InjectClusterClientFactory(clusterClientFactory), - InjectRepositoryFactory(func(ctx context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { + InjectRepositoryFactory(func(_ context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel()) } @@ -212,14 +212,14 @@ func newFakeCluster(kubeconfig cluster.Kubeconfig, configClient config.Client) * } fake.fakeProxy = test.NewFakeProxy() - pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc) error { + pollImmediateWaiter := func(context.Context, time.Duration, time.Duration, wait.ConditionWithContextFunc) error { return nil } fake.internalclient = cluster.New(kubeconfig, configClient, cluster.InjectProxy(fake.fakeProxy), cluster.InjectPollImmediateWaiter(pollImmediateWaiter), - cluster.InjectRepositoryFactory(func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + cluster.InjectRepositoryFactory(func(_ context.Context, provider config.Provider, _ config.Client, _ ...repository.Option) (repository.Client, error) { if _, ok := fake.repositories[provider.Name()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", provider.Name()) } diff --git a/cmd/clusterctl/client/cluster/cert_manager_test.go b/cmd/clusterctl/client/cluster/cert_manager_test.go index 4ed9582fbe3a..7d530f836539 100644 --- a/cmd/clusterctl/client/cluster/cert_manager_test.go +++ b/cmd/clusterctl/client/cluster/cert_manager_test.go @@ -129,7 +129,7 @@ func Test_getManifestObjs(t *testing.T) { cm := &certManagerClient{ configClient: defaultConfigClient, - repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository)) }, } @@ -168,7 +168,7 @@ func Test_getManifestObjs(t *testing.T) { } func Test_GetTimeout(t *testing.T) { - pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc) error { + pollImmediateWaiter := func(context.Context, time.Duration, time.Duration, wait.ConditionWithContextFunc) error { return nil } @@ -426,7 +426,7 @@ func Test_shouldUpgrade(t *testing.T) { proxy := test.NewFakeProxy() fakeConfigClient := newFakeConfig().WithCertManager("", tt.configVersion, "") - pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc) error { + pollImmediateWaiter := func(context.Context, time.Duration, time.Duration, wait.ConditionWithContextFunc) error { return nil } cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter) @@ -715,7 +715,7 @@ func Test_certManagerClient_PlanUpgrade(t *testing.T) { proxy := test.NewFakeProxy().WithObjs(tt.objs...) fakeConfigClient := newFakeConfig() - pollImmediateWaiter := func(ctx context.Context, interval, timeout time.Duration, condition wait.ConditionWithContextFunc) error { + pollImmediateWaiter := func(context.Context, time.Duration, time.Duration, wait.ConditionWithContextFunc) error { return nil } cm := newCertManagerClient(fakeConfigClient, nil, proxy, pollImmediateWaiter) diff --git a/cmd/clusterctl/client/cluster/installer_test.go b/cmd/clusterctl/client/cluster/installer_test.go index b5cee3dd8444..f2d61368f797 100644 --- a/cmd/clusterctl/client/cluster/installer_test.go +++ b/cmd/clusterctl/client/cluster/installer_test.go @@ -245,7 +245,7 @@ func Test_providerInstaller_Validate(t *testing.T) { configClient: configClient, proxy: tt.fields.proxy, providerInventory: newInventoryClient(tt.fields.proxy, nil), - repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { return repository.New(ctx, provider, configClient, repository.InjectRepository(repositoryMap[provider.ManifestLabel()])) }, installQueue: tt.fields.installQueue, diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index ca18608d60e4..0232aba2b8b7 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -967,7 +967,7 @@ func (o *objectMover) createTargetObject(ctx context.Context, nodeToCreate *node // FIXME Workaround for https://github.com/kubernetes/kubernetes/issues/32220. Remove when the issue is fixed. // If the resource already exists, the API server ordinarily returns an AlreadyExists error. Due to the above issue, if the resource has a non-empty metadata.generateName field, the API server returns a ServerTimeoutError. To ensure that the API server returns an AlreadyExists error, we set the metadata.generateName field to an empty string. - if len(obj.GetName()) > 0 && len(obj.GetGenerateName()) > 0 { + if obj.GetName() != "" && obj.GetGenerateName() != "" { obj.SetGenerateName("") } diff --git a/cmd/clusterctl/client/cluster/proxy.go b/cmd/clusterctl/client/cluster/proxy.go index 3a27c459620e..0cc13b589af8 100644 --- a/cmd/clusterctl/client/cluster/proxy.go +++ b/cmd/clusterctl/client/cluster/proxy.go @@ -224,7 +224,7 @@ func (k *proxy) ListResources(ctx context.Context, labels map[string]string, nam // Get all the API resources in the cluster. resourceListBackoff := newReadBackoff() var resourceList []*metav1.APIResourceList - if err := retryWithExponentialBackoff(ctx, resourceListBackoff, func(ctx context.Context) error { + if err := retryWithExponentialBackoff(ctx, resourceListBackoff, func(context.Context) error { resourceList, err = cs.Discovery().ServerPreferredResources() return err }); err != nil { diff --git a/cmd/clusterctl/client/cluster/template_test.go b/cmd/clusterctl/client/cluster/template_test.go index a374e553968b..79c6a809711f 100644 --- a/cmd/clusterctl/client/cluster/template_test.go +++ b/cmd/clusterctl/client/cluster/template_test.go @@ -168,7 +168,7 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { configClient, err := config.New(context.Background(), "", config.InjectReader(test.NewFakeReader())) g.Expect(err).ToNot(HaveOccurred()) - mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, `{ "type": "file", "encoding": "base64", @@ -214,7 +214,7 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { c := &templateClient{ configClient: configClient, - gitHubClientFactory: func(ctx context.Context, configVariablesClient config.VariablesClient) (*github.Client, error) { + gitHubClientFactory: func(context.Context, config.VariablesClient) (*github.Client, error) { return client, nil }, } @@ -232,7 +232,7 @@ func Test_templateClient_getGitHubFileContent(t *testing.T) { } func Test_templateClient_getRawUrlFileContent(t *testing.T) { - fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, template) })) @@ -343,7 +343,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { fakeGithubClient, mux, teardown := test.NewFakeGitHub() defer teardown() - mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/kubernetes-sigs/cluster-api/contents/config/default/cluster-template.yaml", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, `{ "type": "file", "encoding": "base64", @@ -355,7 +355,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { }`) }) - mux.HandleFunc("/repos/some-owner/some-repo/releases/tags/v1.0.0", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/some-owner/some-repo/releases/tags/v1.0.0", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, `{ "tag_name": "v1.0.0", "name": "v1.0.0", @@ -370,11 +370,11 @@ func Test_templateClient_GetFromURL(t *testing.T) { }`) }) - mux.HandleFunc("/repos/some-owner/some-repo/releases/assets/87654321", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/some-owner/some-repo/releases/assets/87654321", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, template) }) - mux.HandleFunc("/repos/some-owner/some-repo/releases/tags/v2.0.0", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/some-owner/some-repo/releases/tags/v2.0.0", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, `{ "tag_name": "v2.0.0", "name": "v2.0.0", @@ -390,14 +390,14 @@ func Test_templateClient_GetFromURL(t *testing.T) { }) // redirect asset - mux.HandleFunc("/repos/some-owner/some-repo/releases/assets/22222222", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/repos/some-owner/some-repo/releases/assets/22222222", func(w http.ResponseWriter, _ *http.Request) { // add the "/api-v3" prefix to match the prefix of the fake github server w.Header().Add("Location", "/api-v3/redirected/22222222") w.WriteHeader(http.StatusFound) }) // redirect location - mux.HandleFunc("/redirected/22222222", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/redirected/22222222", func(w http.ResponseWriter, _ *http.Request) { fmt.Fprint(w, template) }) @@ -488,7 +488,7 @@ func Test_templateClient_GetFromURL(t *testing.T) { ctx := context.Background() - gitHubClientFactory := func(ctx context.Context, configVariablesClient config.VariablesClient) (*github.Client, error) { + gitHubClientFactory := func(context.Context, config.VariablesClient) (*github.Client, error) { return fakeGithubClient, nil } processor := yaml.NewSimpleProcessor() diff --git a/cmd/clusterctl/client/cluster/upgrader_info_test.go b/cmd/clusterctl/client/cluster/upgrader_info_test.go index 52ce9de876e0..ca1bd0fe9576 100644 --- a/cmd/clusterctl/client/cluster/upgrader_info_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_info_test.go @@ -233,7 +233,7 @@ func Test_providerUpgrader_getUpgradeInfo(t *testing.T) { u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repo)) }, } diff --git a/cmd/clusterctl/client/cluster/upgrader_test.go b/cmd/clusterctl/client/cluster/upgrader_test.go index a77f8584c63d..322ad4c74d4a 100644 --- a/cmd/clusterctl/client/cluster/upgrader_test.go +++ b/cmd/clusterctl/client/cluster/upgrader_test.go @@ -315,7 +315,7 @@ func Test_providerUpgrader_Plan(t *testing.T) { u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), @@ -786,7 +786,7 @@ func Test_providerUpgrader_createCustomPlan(t *testing.T) { u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.Name()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), @@ -905,7 +905,7 @@ func Test_providerUpgrader_ApplyPlan(t *testing.T) { u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), @@ -1046,7 +1046,7 @@ func Test_providerUpgrader_ApplyCustomPlan(t *testing.T) { u := &providerUpgrader{ configClient: configClient, - repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, options ...repository.Option) (repository.Client, error) { + repositoryClientFactory: func(ctx context.Context, provider config.Provider, configClient config.Client, _ ...repository.Option) (repository.Client, error) { return repository.New(ctx, provider, configClient, repository.InjectRepository(tt.fields.repository[provider.ManifestLabel()])) }, providerInventory: newInventoryClient(tt.fields.proxy, nil), diff --git a/cmd/clusterctl/client/config/reader_viper_test.go b/cmd/clusterctl/client/config/reader_viper_test.go index 15dfd6a9c090..b5f675416177 100644 --- a/cmd/clusterctl/client/config/reader_viper_test.go +++ b/cmd/clusterctl/client/config/reader_viper_test.go @@ -49,7 +49,7 @@ func Test_viperReader_Init(t *testing.T) { g.Expect(os.WriteFile(configFileBadContents, []byte("bad-contents"), 0600)).To(Succeed()) // To test the remote config file - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "text/plain") _, err := w.Write([]byte("bar: bar")) g.Expect(err).ToNot(HaveOccurred()) @@ -57,7 +57,7 @@ func Test_viperReader_Init(t *testing.T) { defer ts.Close() // To test the remote config file when fails to fetch - tsFail := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tsFail := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNotFound) })) defer tsFail.Close() diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index 765c491728b0..864b894f9277 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -884,7 +884,7 @@ func newFakeClientWithoutCluster(configClient config.Client) *fakeClient { var err error fake.internalClient, err = newClusterctlClient(context.Background(), "fake-config", InjectConfig(fake.configClient), - InjectRepositoryFactory(func(ctx context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { + InjectRepositoryFactory(func(_ context.Context, input RepositoryClientFactoryInput) (repository.Client, error) { if _, ok := fake.repositories[input.Provider.ManifestLabel()]; !ok { return nil, errors.Errorf("repository for kubeconfig %q does not exist", input.Provider.ManifestLabel()) } @@ -1067,7 +1067,7 @@ v3: default3`, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { config1 := newFakeConfig(ctx). WithProvider(infraProviderConfig) cluster1 := newFakeCluster(cluster.Kubeconfig{}, config1) diff --git a/cmd/clusterctl/client/repository/repository_github.go b/cmd/clusterctl/client/repository/repository_github.go index 586ff0634310..45ea67f54161 100644 --- a/cmd/clusterctl/client/repository/repository_github.go +++ b/cmd/clusterctl/client/repository/repository_github.go @@ -406,7 +406,7 @@ func (g *gitHubRepository) httpGetFilesFromRelease(ctx context.Context, version, downloadURL := fmt.Sprintf("https://github.com/%s/%s/releases/download/%s/%s", g.owner, g.repository, version, fileName) var retryError error var content []byte - _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(context.Context) (bool, error) { resp, err := http.Get(downloadURL) //nolint:gosec,noctx if err != nil { retryError = errors.Wrap(err, "error sending request") diff --git a/cmd/clusterctl/client/repository/repository_github_test.go b/cmd/clusterctl/client/repository/repository_github_test.go index 36baebcb078f..95c3a3f784c6 100644 --- a/cmd/clusterctl/client/repository/repository_github_test.go +++ b/cmd/clusterctl/client/repository/repository_github_test.go @@ -549,7 +549,7 @@ func Test_gitHubRepository_getLatestRelease(t *testing.T) { }) // Setup a handler for returning no releases. - muxGoproxy.HandleFunc("/github.com/o/r2/@v/list", func(w http.ResponseWriter, r *http.Request) { + muxGoproxy.HandleFunc("/github.com/o/r2/@v/list", func(_ http.ResponseWriter, r *http.Request) { goproxytest.HTTPTestMethod(t, r, "GET") // no releases }) diff --git a/cmd/clusterctl/cmd/completion.go b/cmd/clusterctl/cmd/completion.go index 935194a1dfa6..89ad896dd642 100644 --- a/cmd/clusterctl/cmd/completion.go +++ b/cmd/clusterctl/cmd/completion.go @@ -89,7 +89,7 @@ var ( Short: "Output shell completion code for the specified shell (bash, zsh or fish)", Long: LongDesc(completionLong), Example: completionExample, - Args: func(cmd *cobra.Command, args []string) error { + Args: func(_ *cobra.Command, args []string) error { if len(args) != 1 { return errors.New("please specify a shell") } @@ -164,7 +164,7 @@ func runCompletionZsh(out io.Writer, cmd *cobra.Command) error { } func contextCompletionFunc(kubeconfigFlag *pflag.Flag) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { configClient, err := config.New(context.Background(), cfgFile) if err != nil { return completionError(err) @@ -181,7 +181,7 @@ func contextCompletionFunc(kubeconfigFlag *pflag.Flag) func(cmd *cobra.Command, } func resourceNameCompletionFunc(kubeconfigFlag, contextFlag, namespaceFlag *pflag.Flag, groupVersion, kind string) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { ctx := context.Background() configClient, err := config.New(ctx, cfgFile) diff --git a/cmd/clusterctl/cmd/config_repositories.go b/cmd/clusterctl/cmd/config_repositories.go index 8e780c390fcd..1351e118cfb8 100644 --- a/cmd/clusterctl/cmd/config_repositories.go +++ b/cmd/clusterctl/cmd/config_repositories.go @@ -66,7 +66,7 @@ var configRepositoryCmd = &cobra.Command{ # Print the list of available providers in yaml format. clusterctl config repositories -o yaml`), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runGetRepositories(cfgFile, os.Stdout) }, } diff --git a/cmd/clusterctl/cmd/delete.go b/cmd/clusterctl/cmd/delete.go index 99e135a26910..97a6e0c9a0e6 100644 --- a/cmd/clusterctl/cmd/delete.go +++ b/cmd/clusterctl/cmd/delete.go @@ -84,7 +84,7 @@ var deleteCmd = &cobra.Command{ # are "orphaned" and thus there may be ongoing costs incurred as a result of this. clusterctl delete --all --include-crd --include-namespace`), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runDelete() }, } diff --git a/cmd/clusterctl/cmd/describe_cluster.go b/cmd/clusterctl/cmd/describe_cluster.go index fe11d2135697..7a7994376133 100644 --- a/cmd/clusterctl/cmd/describe_cluster.go +++ b/cmd/clusterctl/cmd/describe_cluster.go @@ -96,7 +96,7 @@ var describeClusterClusterCmd = &cobra.Command{ # also when their status is the same as the status of the corresponding machine object. clusterctl describe cluster test-1 --echo`), - Args: func(cmd *cobra.Command, args []string) error { + Args: func(_ *cobra.Command, args []string) error { if len(args) != 1 { return errors.New("please specify a cluster name") } diff --git a/cmd/clusterctl/cmd/generate_cluster.go b/cmd/clusterctl/cmd/generate_cluster.go index 976d6b9ee9db..7562a90380b8 100644 --- a/cmd/clusterctl/cmd/generate_cluster.go +++ b/cmd/clusterctl/cmd/generate_cluster.go @@ -92,7 +92,7 @@ var generateClusterClusterCmd = &cobra.Command{ # Prints the list of variables required by the yaml file for creating workload cluster. clusterctl generate cluster my-cluster --list-variables`), - Args: func(cmd *cobra.Command, args []string) error { + Args: func(_ *cobra.Command, args []string) error { if len(args) != 1 { return errors.New("please specify a cluster name") } diff --git a/cmd/clusterctl/cmd/generate_provider.go b/cmd/clusterctl/cmd/generate_provider.go index bd105d8d89bc..187507d353ac 100644 --- a/cmd/clusterctl/cmd/generate_provider.go +++ b/cmd/clusterctl/cmd/generate_provider.go @@ -74,7 +74,7 @@ var generateProviderCmd = &cobra.Command{ # No variables will be processed and substituted using this flag clusterctl generate provider --infrastructure aws:v0.4.1 --raw`), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runGenerateProviderComponents() }, } diff --git a/cmd/clusterctl/cmd/generate_yaml.go b/cmd/clusterctl/cmd/generate_yaml.go index ff978acbb491..5be1e06be2ed 100644 --- a/cmd/clusterctl/cmd/generate_yaml.go +++ b/cmd/clusterctl/cmd/generate_yaml.go @@ -62,7 +62,7 @@ var generateYamlCmd = &cobra.Command{ cat ~/workspace/cluster-template.yaml | clusterctl generate yaml --list-variables `), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return generateYAML(os.Stdin, os.Stdout) }, } diff --git a/cmd/clusterctl/cmd/get_kubeconfig.go b/cmd/clusterctl/cmd/get_kubeconfig.go index dfd35569ac3d..e580b97eb85b 100644 --- a/cmd/clusterctl/cmd/get_kubeconfig.go +++ b/cmd/clusterctl/cmd/get_kubeconfig.go @@ -48,13 +48,13 @@ var getKubeconfigCmd = &cobra.Command{ # Get the workload cluster's kubeconfig in a particular namespace. clusterctl get kubeconfig --namespace foo`), - Args: func(cmd *cobra.Command, args []string) error { + Args: func(_ *cobra.Command, args []string) error { if len(args) != 1 { return errors.New("please specify a workload cluster name") } return nil }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runGetKubeconfig(args[0]) }, } diff --git a/cmd/clusterctl/cmd/init.go b/cmd/clusterctl/cmd/init.go index 6a00a5e7682b..e19eee9796da 100644 --- a/cmd/clusterctl/cmd/init.go +++ b/cmd/clusterctl/cmd/init.go @@ -84,7 +84,7 @@ var initCmd = &cobra.Command{ # Initialize a management cluster with a custom target namespace for the provider resources. clusterctl init --infrastructure aws --target-namespace foo`), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runInit() }, } diff --git a/cmd/clusterctl/cmd/init_list_images.go b/cmd/clusterctl/cmd/init_list_images.go index 9b198acff8f2..f8b6eb4e26ce 100644 --- a/cmd/clusterctl/cmd/init_list_images.go +++ b/cmd/clusterctl/cmd/init_list_images.go @@ -43,7 +43,7 @@ var initListImagesCmd = &cobra.Command{ clusterctl init list-images --infrastructure vcd --bootstrap kubeadm --control-plane nested --core cluster-api:v1.2.0 `), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runInitListImages() }, } diff --git a/cmd/clusterctl/cmd/move.go b/cmd/clusterctl/cmd/move.go index c8316b3762f6..2a51bf2c0d9b 100644 --- a/cmd/clusterctl/cmd/move.go +++ b/cmd/clusterctl/cmd/move.go @@ -58,7 +58,7 @@ var moveCmd = &cobra.Command{ clusterctl move --from-directory /tmp/backup-directory `), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runMove() }, } diff --git a/cmd/clusterctl/cmd/rollout/pause.go b/cmd/clusterctl/cmd/rollout/pause.go index 7b921c747def..9c72bed80d41 100644 --- a/cmd/clusterctl/cmd/rollout/pause.go +++ b/cmd/clusterctl/cmd/rollout/pause.go @@ -58,7 +58,7 @@ func NewCmdRolloutPause(cfgFile string) *cobra.Command { Short: "Pause a cluster-api resource", Long: pauseLong, Example: pauseExample, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runPause(cfgFile, args) }, } diff --git a/cmd/clusterctl/cmd/rollout/resume.go b/cmd/clusterctl/cmd/rollout/resume.go index fec6af6e116d..393a8fc242a7 100644 --- a/cmd/clusterctl/cmd/rollout/resume.go +++ b/cmd/clusterctl/cmd/rollout/resume.go @@ -57,7 +57,7 @@ func NewCmdRolloutResume(cfgFile string) *cobra.Command { Short: "Resume a cluster-api resource", Long: resumeLong, Example: resumeExample, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runResume(cfgFile, args) }, } diff --git a/cmd/clusterctl/cmd/rollout/undo.go b/cmd/clusterctl/cmd/rollout/undo.go index 5e018207212b..aec6af75c580 100644 --- a/cmd/clusterctl/cmd/rollout/undo.go +++ b/cmd/clusterctl/cmd/rollout/undo.go @@ -56,7 +56,7 @@ func NewCmdRolloutUndo(cfgFile string) *cobra.Command { Short: "Undo a cluster-api resource", Long: undoLong, Example: undoExample, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return runUndo(cfgFile, args) }, } diff --git a/cmd/clusterctl/cmd/root.go b/cmd/clusterctl/cmd/root.go index 55f8bdc52d92..6e54068fd813 100644 --- a/cmd/clusterctl/cmd/root.go +++ b/cmd/clusterctl/cmd/root.go @@ -59,7 +59,7 @@ var RootCmd = &cobra.Command{ Long: LongDesc(` Get started with Cluster API using clusterctl to create a management cluster, install providers, and create templates for your workload cluster.`), - PersistentPostRunE: func(cmd *cobra.Command, args []string) error { + PersistentPostRunE: func(*cobra.Command, []string) error { ctx := context.Background() // Check if clusterctl needs an upgrade "AFTER" running each command diff --git a/cmd/clusterctl/cmd/topology_plan.go b/cmd/clusterctl/cmd/topology_plan.go index 0fe62a1ab3dc..76fcf7a4601d 100644 --- a/cmd/clusterctl/cmd/topology_plan.go +++ b/cmd/clusterctl/cmd/topology_plan.go @@ -84,7 +84,7 @@ var topologyPlanCmd = &cobra.Command{ clusterctl alpha topology plan -f modified-template.yaml -o output/ `), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runTopologyPlan() }, } diff --git a/cmd/clusterctl/cmd/upgrade.go b/cmd/clusterctl/cmd/upgrade.go index 2a4841fcc3af..7721d8a04c8e 100644 --- a/cmd/clusterctl/cmd/upgrade.go +++ b/cmd/clusterctl/cmd/upgrade.go @@ -29,7 +29,7 @@ var upgradeCmd = &cobra.Command{ GroupID: groupManagement, Short: "Upgrade core and provider components in a management cluster", Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { return cmd.Help() }, } diff --git a/cmd/clusterctl/cmd/upgrade_apply.go b/cmd/clusterctl/cmd/upgrade_apply.go index 137a10ef56fa..402df535405d 100644 --- a/cmd/clusterctl/cmd/upgrade_apply.go +++ b/cmd/clusterctl/cmd/upgrade_apply.go @@ -62,7 +62,7 @@ var upgradeApplyCmd = &cobra.Command{ # Upgrades only the aws provider to the v2.0.1 version. clusterctl upgrade apply --infrastructure aws:v2.0.1`), Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runUpgradeApply() }, } diff --git a/cmd/clusterctl/cmd/upgrade_plan.go b/cmd/clusterctl/cmd/upgrade_plan.go index 52b05cf8ca79..ab1cb989ac4a 100644 --- a/cmd/clusterctl/cmd/upgrade_plan.go +++ b/cmd/clusterctl/cmd/upgrade_plan.go @@ -53,7 +53,7 @@ var upgradePlanCmd = &cobra.Command{ # Gets the recommended target versions for upgrading Cluster API providers. clusterctl upgrade plan`), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runUpgradePlan() }, } diff --git a/cmd/clusterctl/cmd/util.go b/cmd/clusterctl/cmd/util.go index 59e1d50e9535..01b65357fd44 100644 --- a/cmd/clusterctl/cmd/util.go +++ b/cmd/clusterctl/cmd/util.go @@ -63,7 +63,7 @@ func printVariablesOutput(template client.Template, options client.GetClusterTem if variableMap[name] != nil { v := *variableMap[name] // Add quotes around any unquoted strings - if len(v) > 0 && !strings.HasPrefix(v, "\"") { + if v != "" && !strings.HasPrefix(v, "\"") { v = fmt.Sprintf("%q", v) variableMap[name] = &v } diff --git a/cmd/clusterctl/cmd/version.go b/cmd/clusterctl/cmd/version.go index 1edf9e4c1805..52635ada126e 100644 --- a/cmd/clusterctl/cmd/version.go +++ b/cmd/clusterctl/cmd/version.go @@ -43,7 +43,7 @@ var versionCmd = &cobra.Command{ GroupID: groupOther, Short: "Print clusterctl version", Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(*cobra.Command, []string) error { return runVersion() }, } diff --git a/cmd/clusterctl/cmd/version_checker_test.go b/cmd/clusterctl/cmd/version_checker_test.go index ad5da76b7fa0..a237c232447e 100644 --- a/cmd/clusterctl/cmd/version_checker_test.go +++ b/cmd/clusterctl/cmd/version_checker_test.go @@ -340,7 +340,7 @@ func TestVersionChecker_ReadFromStateFile(t *testing.T) { fakeGithubClient2, mux2, cleanup2 := test.NewFakeGitHub() mux2.HandleFunc( "/repos/kubernetes-sigs/cluster-api/releases/latest", - func(w http.ResponseWriter, r *http.Request) { + func(w http.ResponseWriter, _ *http.Request) { githubCalled = true fmt.Fprint(w, `{"tag_name": "v0.3.99", "html_url": "https://github.com/foo/bar/releases/v0.3.99"}`) }, diff --git a/cmd/clusterctl/log/logger.go b/cmd/clusterctl/log/logger.go index a64e2bb657cd..34435cdea22e 100644 --- a/cmd/clusterctl/log/logger.go +++ b/cmd/clusterctl/log/logger.go @@ -106,7 +106,7 @@ func (l *logger) V(level int) logr.LogSink { // WithName adds a new element to the logger's name. func (l *logger) WithName(name string) logr.LogSink { nl := l.clone() - if len(l.prefix) > 0 { + if l.prefix != "" { nl.prefix = l.prefix + "/" } nl.prefix += name diff --git a/controlplane/kubeadm/internal/control_plane_test.go b/controlplane/kubeadm/internal/control_plane_test.go index b68fba77d5c7..f87a5ab9dbbf 100644 --- a/controlplane/kubeadm/internal/control_plane_test.go +++ b/controlplane/kubeadm/internal/control_plane_test.go @@ -52,11 +52,11 @@ func TestControlPlane(t *testing.T) { }, } - t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(t *testing.T) { + t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(*testing.T) { g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two")) }) - t.Run("With some machines in non defined failure domains", func(t *testing.T) { + t.Run("With some machines in non defined failure domains", func(*testing.T) { controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown"))) g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown")) }) diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index b2a4e490a583..aaf7c09a180b 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -818,7 +818,7 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { kcpOwner := *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")) - t.Run("add KCP owner for secrets with no controller reference", func(t *testing.T) { + t.Run("add KCP owner for secrets with no controller reference", func(*testing.T) { objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} certificates := secret.Certificates{ {Purpose: secret.ClusterCA}, @@ -855,7 +855,7 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { } }) - t.Run("replace non-KCP controller with KCP controller reference", func(t *testing.T) { + t.Run("replace non-KCP controller with KCP controller reference", func(*testing.T) { objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()} certificates := secret.Certificates{ {Purpose: secret.ClusterCA}, diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index 67eebb71719f..ffde4eb284f0 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -787,7 +787,7 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { fakeClient := newFakeClient(kcp, tt.configSecret) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, diff --git a/controlplane/kubeadm/internal/controllers/suite_test.go b/controlplane/kubeadm/internal/controllers/suite_test.go index 40e89d39a7b5..a0fc57112258 100644 --- a/controlplane/kubeadm/internal/controllers/suite_test.go +++ b/controlplane/kubeadm/internal/controllers/suite_test.go @@ -36,7 +36,7 @@ var ( ) func TestMain(m *testing.M) { - setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) { + setupReconcilers := func(_ context.Context, mgr ctrl.Manager) { var err error secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ HTTPClient: mgr.GetHTTPClient(), diff --git a/controlplane/kubeadm/internal/etcd_client_generator_test.go b/controlplane/kubeadm/internal/etcd_client_generator_test.go index 394d0142e5f2..ed9703b4af9e 100644 --- a/controlplane/kubeadm/internal/etcd_client_generator_test.go +++ b/controlplane/kubeadm/internal/etcd_client_generator_test.go @@ -54,7 +54,7 @@ func TestFirstAvailableNode(t *testing.T) { { name: "Returns client successfully", nodes: []string{"node-1"}, - cc: func(ctx context.Context, endpoint string) (*etcd.Client, error) { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { return &etcd.Client{Endpoint: endpoint}, nil }, expectedClient: etcd.Client{Endpoint: "etcd-node-1"}, @@ -68,7 +68,7 @@ func TestFirstAvailableNode(t *testing.T) { { name: "Returns error from client", nodes: []string{"node-1", "node-2"}, - cc: func(ctx context.Context, endpoint string) (*etcd.Client, error) { + cc: func(context.Context, string) (*etcd.Client, error) { return nil, errors.New("something went wrong") }, expectedErr: "could not establish a connection to any etcd node: something went wrong", @@ -76,7 +76,7 @@ func TestFirstAvailableNode(t *testing.T) { { name: "Returns client when some of the nodes are down but at least one node is up", nodes: []string{"node-down-1", "node-down-2", "node-up"}, - cc: func(ctx context.Context, endpoint string) (*etcd.Client, error) { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { if strings.Contains(endpoint, "node-down") { return nil, errors.New("node down") } @@ -117,7 +117,7 @@ func TestForLeader(t *testing.T) { { name: "Returns client for leader successfully", nodes: []string{"node-1", "node-leader"}, - cc: func(ctx context.Context, endpoint string) (*etcd.Client, error) { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { return &etcd.Client{ Endpoint: endpoint, LeaderID: 1729, @@ -146,7 +146,7 @@ func TestForLeader(t *testing.T) { { name: "Returns client for leader even when one or more nodes are down", nodes: []string{"node-down-1", "node-down-2", "node-leader"}, - cc: func(ctx context.Context, endpoint string) (*etcd.Client, error) { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { if strings.Contains(endpoint, "node-down") { return nil, errors.New("node down") } @@ -182,7 +182,7 @@ func TestForLeader(t *testing.T) { { name: "Returns error when the leader does not have a corresponding node", nodes: []string{"node-1"}, - cc: func(ctx context.Context, endpoint string) (*etcd.Client, error) { + cc: func(_ context.Context, endpoint string) (*etcd.Client, error) { return &etcd.Client{ Endpoint: endpoint, LeaderID: 1729, @@ -201,7 +201,7 @@ func TestForLeader(t *testing.T) { { name: "Returns error when all nodes are down", nodes: []string{"node-down-1", "node-down-2", "node-down-3"}, - cc: func(ctx context.Context, endpoint string) (*etcd.Client, error) { + cc: func(context.Context, string) (*etcd.Client, error) { return nil, errors.New("node down") }, expectedErr: "could not establish a connection to the etcd leader: [could not establish a connection to any etcd node: node down, failed to connect to etcd node]", diff --git a/controlplane/kubeadm/internal/suite_test.go b/controlplane/kubeadm/internal/suite_test.go index 7bdd48b5ae72..65e3c9a61ff1 100644 --- a/controlplane/kubeadm/internal/suite_test.go +++ b/controlplane/kubeadm/internal/suite_test.go @@ -36,7 +36,7 @@ var ( ) func TestMain(m *testing.M) { - setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) { + setupReconcilers := func(_ context.Context, mgr ctrl.Manager) { var err error secretCachingClient, err = client.New(mgr.GetConfig(), client.Options{ HTTPClient: mgr.GetHTTPClient(), diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index 8a29aaf5282a..bb15b6d99da2 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -583,7 +583,7 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { `), }, }}, - mutator: func(c *bootstrapv1.ClusterConfiguration) {}, + mutator: func(*bootstrapv1.ClusterConfiguration) {}, wantConfigMap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, @@ -764,7 +764,7 @@ func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { `), }, }}, - mutator: func(status *bootstrapv1.ClusterStatus) {}, + mutator: func(*bootstrapv1.ClusterStatus) {}, wantConfigMap: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, diff --git a/exp/addons/internal/controllers/predicates/resource_predicates.go b/exp/addons/internal/controllers/predicates/resource_predicates.go index bc12a340a409..7ad3322ec2c7 100644 --- a/exp/addons/internal/controllers/predicates/resource_predicates.go +++ b/exp/addons/internal/controllers/predicates/resource_predicates.go @@ -26,9 +26,9 @@ import ( // ResourceCreateOrUpdate returns a predicate that returns true for create and update events. func ResourceCreateOrUpdate(_ logr.Logger) predicate.Funcs { return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { return true }, - UpdateFunc: func(e event.UpdateEvent) bool { return true }, - DeleteFunc: func(e event.DeleteEvent) bool { return false }, - GenericFunc: func(e event.GenericEvent) bool { return false }, + CreateFunc: func(event.CreateEvent) bool { return true }, + UpdateFunc: func(event.UpdateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } diff --git a/exp/internal/controllers/machinepool_controller_phases_test.go b/exp/internal/controllers/machinepool_controller_phases_test.go index f71f7d57d97f..9e24b836e958 100644 --- a/exp/internal/controllers/machinepool_controller_phases_test.go +++ b/exp/internal/controllers/machinepool_controller_phases_test.go @@ -1248,7 +1248,7 @@ func TestReconcileMachinePoolMachines(t *testing.T) { cluster := builder.Cluster(ns.Name, clusterName).Build() g.Expect(env.Create(ctx, cluster)).To(Succeed()) - t.Run("Should do nothing if machines already exist", func(t *testing.T) { + t.Run("Should do nothing if machines already exist", func(*testing.T) { machinePool := getMachinePool(2, "machinepool-test-1", clusterName, ns.Name) g.Expect(env.Create(ctx, &machinePool)).To(Succeed()) @@ -1314,7 +1314,7 @@ func TestReconcileMachinePoolMachines(t *testing.T) { } }) - t.Run("Should create two machines if two infra machines exist", func(t *testing.T) { + t.Run("Should create two machines if two infra machines exist", func(*testing.T) { machinePool := getMachinePool(2, "machinepool-test-2", clusterName, ns.Name) g.Expect(env.Create(ctx, &machinePool)).To(Succeed()) @@ -1375,7 +1375,7 @@ func TestReconcileMachinePoolMachines(t *testing.T) { } }) - t.Run("Should do nothing if machinepool does not support machinepool machines", func(t *testing.T) { + t.Run("Should do nothing if machinepool does not support machinepool machines", func(*testing.T) { machinePool := getMachinePool(2, "machinepool-test-3", clusterName, ns.Name) g.Expect(env.Create(ctx, &machinePool)).To(Succeed()) diff --git a/exp/internal/controllers/machinepool_controller_test.go b/exp/internal/controllers/machinepool_controller_test.go index 3fad26eaf901..731ad55b0200 100644 --- a/exp/internal/controllers/machinepool_controller_test.go +++ b/exp/internal/controllers/machinepool_controller_test.go @@ -709,7 +709,7 @@ func TestMachinePoolConditions(t *testing.T) { name: "all conditions true", bootstrapReady: true, infrastructureReady: true, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, mp *expv1.MachinePool, nodeList *corev1.NodeList) { + beforeFunc: func(_, _ *unstructured.Unstructured, mp *expv1.MachinePool, _ *corev1.NodeList) { mp.Spec.ProviderIDList = []string{"azure://westus2/id-node-4", "aws://us-east-1/id-node-1"} mp.Status = expv1.MachinePoolStatus{ NodeRefs: []corev1.ObjectReference{ @@ -734,7 +734,7 @@ func TestMachinePoolConditions(t *testing.T) { name: "boostrap not ready", bootstrapReady: false, infrastructureReady: true, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, mp *expv1.MachinePool, nodeList *corev1.NodeList) { + beforeFunc: func(bootstrap, _ *unstructured.Unstructured, _ *expv1.MachinePool, _ *corev1.NodeList) { addConditionsToExternal(bootstrap, clusterv1.Conditions{ { Type: clusterv1.ReadyCondition, @@ -775,7 +775,7 @@ func TestMachinePoolConditions(t *testing.T) { name: "infrastructure not ready", bootstrapReady: true, infrastructureReady: false, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, mp *expv1.MachinePool, nodeList *corev1.NodeList) { + beforeFunc: func(_, infra *unstructured.Unstructured, _ *expv1.MachinePool, _ *corev1.NodeList) { addConditionsToExternal(infra, clusterv1.Conditions{ { Type: clusterv1.ReadyCondition, @@ -817,7 +817,7 @@ func TestMachinePoolConditions(t *testing.T) { name: "incorrect infrastructure reference", bootstrapReady: true, expectError: true, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, mp *expv1.MachinePool, nodeList *corev1.NodeList) { + beforeFunc: func(_, _ *unstructured.Unstructured, mp *expv1.MachinePool, _ *corev1.NodeList) { mp.Spec.Template.Spec.InfrastructureRef = corev1.ObjectReference{ APIVersion: builder.InfrastructureGroupVersion.String(), Kind: builder.TestInfrastructureMachineTemplateKind, diff --git a/exp/ipam/internal/webhooks/ipaddress_test.go b/exp/ipam/internal/webhooks/ipaddress_test.go index 909c7721d8d9..081df1d2cbf4 100644 --- a/exp/ipam/internal/webhooks/ipaddress_test.go +++ b/exp/ipam/internal/webhooks/ipaddress_test.go @@ -80,13 +80,13 @@ func TestIPAddressValidateCreate(t *testing.T) { }{ { name: "a valid IPv4 Address should be accepted", - ip: getAddress(false, func(addr *ipamv1.IPAddress) {}), + ip: getAddress(false, func(*ipamv1.IPAddress) {}), extraObjs: []client.Object{claim}, expectErr: false, }, { name: "a valid IPv6 Address should be accepted", - ip: getAddress(true, func(addr *ipamv1.IPAddress) {}), + ip: getAddress(true, func(*ipamv1.IPAddress) {}), extraObjs: []client.Object{claim}, expectErr: false, }, @@ -201,13 +201,13 @@ func TestIPAddressValidateUpdate(t *testing.T) { }{ { name: "should accept objects with identical spec", - oldIP: getAddress(func(addr *ipamv1.IPAddress) {}), - newIP: getAddress(func(addr *ipamv1.IPAddress) {}), + oldIP: getAddress(func(*ipamv1.IPAddress) {}), + newIP: getAddress(func(*ipamv1.IPAddress) {}), expectErr: false, }, { name: "should reject objects with different spec", - oldIP: getAddress(func(addr *ipamv1.IPAddress) {}), + oldIP: getAddress(func(*ipamv1.IPAddress) {}), newIP: getAddress(func(addr *ipamv1.IPAddress) { addr.Spec.Address = "10.0.0.2" }), diff --git a/exp/ipam/internal/webhooks/ipaddressclaim_test.go b/exp/ipam/internal/webhooks/ipaddressclaim_test.go index 2f9f5025a6d0..a65db582d428 100644 --- a/exp/ipam/internal/webhooks/ipaddressclaim_test.go +++ b/exp/ipam/internal/webhooks/ipaddressclaim_test.go @@ -49,7 +49,7 @@ func TestIPAddressClaimValidateCreate(t *testing.T) { }{ { name: "should accept a valid claim", - claim: getClaim(func(addr *ipamv1.IPAddressClaim) {}), + claim: getClaim(func(*ipamv1.IPAddressClaim) {}), expectErr: false, }, { @@ -98,13 +98,13 @@ func TestIPAddressClaimValidateUpdate(t *testing.T) { }{ { name: "should accept objects with identical spec", - oldClaim: getClaim(func(addr *ipamv1.IPAddressClaim) {}), - newClaim: getClaim(func(addr *ipamv1.IPAddressClaim) {}), + oldClaim: getClaim(func(*ipamv1.IPAddressClaim) {}), + newClaim: getClaim(func(*ipamv1.IPAddressClaim) {}), expectErr: false, }, { name: "should reject objects with different spec", - oldClaim: getClaim(func(addr *ipamv1.IPAddressClaim) {}), + oldClaim: getClaim(func(*ipamv1.IPAddressClaim) {}), newClaim: getClaim(func(addr *ipamv1.IPAddressClaim) { addr.Spec.PoolRef.Name = "different" }), diff --git a/exp/runtime/internal/controllers/extensionconfig_controller_test.go b/exp/runtime/internal/controllers/extensionconfig_controller_test.go index 49c8cccd483f..2026e1cba300 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller_test.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller_test.go @@ -89,7 +89,7 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { defer func() { g.Expect(env.CleanupAndWait(ctx, extensionConfig)).To(Succeed()) }() - t.Run("fail reconcile if registry has not been warmed up", func(t *testing.T) { + t.Run("fail reconcile if registry has not been warmed up", func(*testing.T) { // Attempt to reconcile. This will be an error as the registry has not been warmed up at this point. res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(extensionConfig)}) g.Expect(err).ToNot(HaveOccurred()) @@ -97,7 +97,7 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { g.Expect(res.Requeue).To(BeTrue()) }) - t.Run("successful reconcile and discovery on ExtensionConfig create", func(t *testing.T) { + t.Run("successful reconcile and discovery on ExtensionConfig create", func(*testing.T) { // Warm up the registry before trying reconciliation again. warmup := &warmupRunnable{ Client: env.GetClient(), @@ -132,7 +132,7 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) }) - t.Run("Successful reconcile and discovery on Extension update", func(t *testing.T) { + t.Run("Successful reconcile and discovery on Extension update", func(*testing.T) { // Start a new ExtensionServer where the second handler is removed. updatedServer, err := fakeSecureExtensionServer(discoveryHandler("first", "third")) g.Expect(err).ToNot(HaveOccurred()) @@ -185,7 +185,7 @@ func TestExtensionReconciler_Reconcile(t *testing.T) { _, err = registry.Get("second.ext1") g.Expect(err).To(HaveOccurred()) }) - t.Run("Successful reconcile and deregister on ExtensionConfig delete", func(t *testing.T) { + t.Run("Successful reconcile and deregister on ExtensionConfig delete", func(*testing.T) { g.Expect(env.CleanupAndWait(ctx, extensionConfig)).To(Succeed()) _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(extensionConfig)}) g.Expect(env.Get(ctx, util.ObjectKey(extensionConfig), extensionConfig)).To(Not(Succeed())) @@ -203,7 +203,7 @@ func TestExtensionReconciler_discoverExtensionConfig(t *testing.T) { ns, err := env.CreateNamespace(ctx, "test-runtime-extension") g.Expect(err).ToNot(HaveOccurred()) - t.Run("test discovery of a single extension", func(t *testing.T) { + t.Run("test discovery of a single extension", func(*testing.T) { cat := runtimecatalog.New() g.Expect(fakev1alpha1.AddToCatalog(cat)).To(Succeed()) @@ -237,7 +237,7 @@ func TestExtensionReconciler_discoverExtensionConfig(t *testing.T) { g.Expect(conditions[0].Status).To(Equal(corev1.ConditionTrue)) g.Expect(conditions[0].Type).To(Equal(runtimev1.RuntimeExtensionDiscoveredCondition)) }) - t.Run("fail discovery for non-running extension", func(t *testing.T) { + t.Run("fail discovery for non-running extension", func(*testing.T) { cat := runtimecatalog.New() g.Expect(fakev1alpha1.AddToCatalog(cat)).To(Succeed()) registry := runtimeregistry.New() @@ -359,7 +359,7 @@ func discoveryHandler(handlerList ...string) func(http.ResponseWriter, *http.Req panic(err) } - return func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) _, _ = w.Write(respBody) } diff --git a/exp/runtime/topologymutation/variables_test.go b/exp/runtime/topologymutation/variables_test.go index 844a3afaa3e7..eb1920258569 100644 --- a/exp/runtime/topologymutation/variables_test.go +++ b/exp/runtime/topologymutation/variables_test.go @@ -64,7 +64,7 @@ func Test_GetRawTemplateVariable(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { value, err := GetVariable(tt.variables, tt.variableName) g.Expect(value).To(BeComparableTo(tt.expectedValue)) @@ -121,7 +121,7 @@ func Test_GetStringTemplateVariable(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { value, err := GetStringVariable(tt.variables, tt.variableName) g.Expect(value).To(Equal(tt.expectedValue)) @@ -178,7 +178,7 @@ func Test_GetBoolVariable(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { value, err := GetBoolVariable(tt.variables, tt.variableName) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) @@ -266,7 +266,7 @@ func Test_GetVariableObjectWithNestedType(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { err := GetObjectVariableInto(tt.variables, tt.variableName, tt.object) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) diff --git a/exp/runtime/topologymutation/walker_test.go b/exp/runtime/topologymutation/walker_test.go index 52c0e60fb7b5..996ed03ef3e2 100644 --- a/exp/runtime/topologymutation/walker_test.go +++ b/exp/runtime/topologymutation/walker_test.go @@ -51,7 +51,7 @@ func Test_WalkTemplates(t *testing.T) { controlplanev1.GroupVersion, bootstrapv1.GroupVersion, ) - mutatingFunc := func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, holderRef runtimehooksv1.HolderReference) error { + mutatingFunc := func(_ context.Context, obj runtime.Object, _ map[string]apiextensionsv1.JSON, _ runtimehooksv1.HolderReference) error { switch obj := obj.(type) { case *controlplanev1.KubeadmControlPlaneTemplate: obj.Annotations = map[string]string{"a": "a"} @@ -218,7 +218,7 @@ func Test_WalkTemplates(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { response := &runtimehooksv1.GeneratePatchesResponse{} request := &runtimehooksv1.GeneratePatchesRequest{Variables: tt.globalVariables, Items: tt.requestItems} diff --git a/exp/util/util_test.go b/exp/util/util_test.go index cc29fc5855f0..bde9a221c5d1 100644 --- a/exp/util/util_test.go +++ b/exp/util/util_test.go @@ -131,7 +131,7 @@ func TestGetMachinePoolByLabels(t *testing.T) { for _, tc := range testcases { tc := tc - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { clientFake := fake.NewClientBuilder(). WithScheme(fakeScheme). WithObjects( diff --git a/hack/tools/internal/log-push/main.go b/hack/tools/internal/log-push/main.go index bc9caaa9adc5..bdf66621f3f3 100644 --- a/hack/tools/internal/log-push/main.go +++ b/hack/tools/internal/log-push/main.go @@ -122,7 +122,7 @@ func getLogsFromFile(logPath string, logFileRegex *regexp.Regexp) (map[string]Lo klog.Infof("Getting logs from %s", logPath) logData := map[string]LogData{} - err := filepath.Walk(logPath, func(fileName string, info fs.FileInfo, err error) error { + err := filepath.Walk(logPath, func(fileName string, _ fs.FileInfo, err error) error { if err != nil { return err } diff --git a/hack/tools/internal/tilt-prepare/main.go b/hack/tools/internal/tilt-prepare/main.go index 07b34a85f3c4..c1366ea8c8ff 100644 --- a/hack/tools/internal/tilt-prepare/main.go +++ b/hack/tools/internal/tilt-prepare/main.go @@ -585,8 +585,8 @@ type chartFile struct { // outdated charts below the given path. This is necessary because kustomize just // uses a local Chart if it exists, no matter if it matches the required version or not. func cleanupChartTask(path string) taskFunction { - return func(ctx context.Context, prefix string, errCh chan error) { - err := filepath.WalkDir(path, func(path string, d fs.DirEntry, _ error) error { + return func(_ context.Context, prefix string, errCh chan error) { + err := filepath.WalkDir(path, func(path string, _ fs.DirEntry, _ error) error { if !strings.HasSuffix(path, "kustomization.yaml") { return nil } diff --git a/hack/tools/mdbook/embed/embed.go b/hack/tools/mdbook/embed/embed.go index 964668a12f8f..a4d1d5d38dd9 100644 --- a/hack/tools/mdbook/embed/embed.go +++ b/hack/tools/mdbook/embed/embed.go @@ -41,7 +41,7 @@ func (Embed) SupportsOutput(_ string) bool { return true } // Process modifies the book in the input, which gets returned as the result of the plugin. func (l Embed) Process(input *plugin.Input) error { - return plugin.EachCommand(&input.Book, "embed-github", func(chapter *plugin.BookChapter, args string) (string, error) { + return plugin.EachCommand(&input.Book, "embed-github", func(_ *plugin.BookChapter, args string) (string, error) { tags := reflect.StructTag(strings.TrimSpace(args)) repository := tags.Get("repo") diff --git a/hack/tools/mdbook/releaselink/releaselink.go b/hack/tools/mdbook/releaselink/releaselink.go index e3d5f6bae551..c6895e03aa29 100644 --- a/hack/tools/mdbook/releaselink/releaselink.go +++ b/hack/tools/mdbook/releaselink/releaselink.go @@ -47,7 +47,7 @@ func (ReleaseLink) SupportsOutput(_ string) bool { return true } // Process modifies the book in the input, which gets returned as the result of the plugin. func (l ReleaseLink) Process(input *plugin.Input) error { - return plugin.EachCommand(&input.Book, "releaselink", func(chapter *plugin.BookChapter, args string) (string, error) { + return plugin.EachCommand(&input.Book, "releaselink", func(_ *plugin.BookChapter, args string) (string, error) { var gomodule, asset, repo string var found bool diff --git a/hack/tools/mdbook/tabulate/tabulate.go b/hack/tools/mdbook/tabulate/tabulate.go index 16d83cb579da..583198bb9380 100644 --- a/hack/tools/mdbook/tabulate/tabulate.go +++ b/hack/tools/mdbook/tabulate/tabulate.go @@ -38,7 +38,7 @@ func (Tabulate) SupportsOutput(_ string) bool { return true } // Process modifies the book in the input, which gets returned as the result of the plugin. func (l Tabulate) Process(input *plugin.Input) error { - if err := plugin.EachCommand(&input.Book, "tabs", func(chapter *plugin.BookChapter, args string) (string, error) { + if err := plugin.EachCommand(&input.Book, "tabs", func(_ *plugin.BookChapter, args string) (string, error) { var bld strings.Builder tags := reflect.StructTag(strings.TrimSpace(args)) groupName := tags.Get("name") @@ -63,19 +63,19 @@ func (l Tabulate) Process(input *plugin.Input) error { return err } - if err := plugin.EachCommand(&input.Book, "tab", func(chapter *plugin.BookChapter, name string) (string, error) { + if err := plugin.EachCommand(&input.Book, "tab", func(_ *plugin.BookChapter, name string) (string, error) { return fmt.Sprintf(`
`, name), nil }); err != nil { return err } - if err := plugin.EachCommand(&input.Book, "/tab", func(chapter *plugin.BookChapter, args string) (string, error) { + if err := plugin.EachCommand(&input.Book, "/tab", func(*plugin.BookChapter, string) (string, error) { return "
", nil }); err != nil { return err } - return plugin.EachCommand(&input.Book, "/tabs", func(chapter *plugin.BookChapter, args string) (string, error) { + return plugin.EachCommand(&input.Book, "/tabs", func(*plugin.BookChapter, string) (string, error) { return "", nil }) } diff --git a/internal/controllers/clusterclass/clusterclass_controller_test.go b/internal/controllers/clusterclass/clusterclass_controller_test.go index 6301b56eeeae..9db48c0405c5 100644 --- a/internal/controllers/clusterclass/clusterclass_controller_test.go +++ b/internal/controllers/clusterclass/clusterclass_controller_test.go @@ -526,7 +526,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). WithCallExtensionResponses( map[string]runtimehooksv1.ResponseObject{ diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 7f73968fc96f..56c79b22d702 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -855,7 +855,7 @@ func TestMachineConditions(t *testing.T) { name: "all conditions true", infraReady: true, bootstrapReady: true, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { + beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { // since these conditions are set by an external controller conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededCondition) conditions.MarkTrue(m, clusterv1.MachineOwnerRemediatedCondition) @@ -872,7 +872,7 @@ func TestMachineConditions(t *testing.T) { name: "infra condition consumes reason from the infra config", infraReady: false, bootstrapReady: true, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { + beforeFunc: func(_, infra *unstructured.Unstructured, _ *clusterv1.Machine) { addConditionsToExternal(infra, clusterv1.Conditions{ { Type: clusterv1.ReadyCondition, @@ -899,7 +899,7 @@ func TestMachineConditions(t *testing.T) { name: "bootstrap condition consumes reason from the bootstrap config", infraReady: true, bootstrapReady: false, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { + beforeFunc: func(bootstrap, _ *unstructured.Unstructured, _ *clusterv1.Machine) { addConditionsToExternal(bootstrap, clusterv1.Conditions{ { Type: clusterv1.ReadyCondition, @@ -936,7 +936,7 @@ func TestMachineConditions(t *testing.T) { name: "ready condition summary consumes reason from the machine owner remediated condition", infraReady: true, bootstrapReady: true, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { + beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed") }, conditionsToAssert: []*clusterv1.Condition{ @@ -947,7 +947,7 @@ func TestMachineConditions(t *testing.T) { name: "ready condition summary consumes reason from the MHC succeeded condition", infraReady: true, bootstrapReady: true, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { + beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, "") }, conditionsToAssert: []*clusterv1.Condition{ @@ -978,7 +978,7 @@ func TestMachineConditions(t *testing.T) { name: "ready condition summary consumes reason from the draining succeeded condition", infraReady: true, bootstrapReady: true, - beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { + beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, "") }, conditionsToAssert: []*clusterv1.Condition{ @@ -2204,7 +2204,7 @@ func TestNodeDeletion(t *testing.T) { } for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { m := testMachine.DeepCopy() m.Spec.NodeDeletionTimeout = tc.deletionTimeout diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 8c42a8eab843..09c42c0f4f83 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -114,7 +114,7 @@ func (r *Reconciler) getNewMachineSet(ctx context.Context, md *clusterv1.Machine } // Ensure MachineDeployment has the latest MachineSet revision in its revision annotation. - err = r.updateMachineDeployment(ctx, md, func(innerDeployment *clusterv1.MachineDeployment) { + err = r.updateMachineDeployment(ctx, md, func(*clusterv1.MachineDeployment) { mdutil.SetDeploymentRevision(md, updatedMS.Annotations[clusterv1.RevisionAnnotation]) }) if err != nil { @@ -134,7 +134,7 @@ func (r *Reconciler) getNewMachineSet(ctx context.Context, md *clusterv1.Machine } // Ensure MachineDeployment has the latest MachineSet revision in its revision annotation. - err = r.updateMachineDeployment(ctx, md, func(innerDeployment *clusterv1.MachineDeployment) { + err = r.updateMachineDeployment(ctx, md, func(*clusterv1.MachineDeployment) { mdutil.SetDeploymentRevision(md, newMS.Annotations[clusterv1.RevisionAnnotation]) }) if err != nil { diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index af564c089817..afa3b635fe77 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -434,7 +434,7 @@ func serverSideApplyPatchHelperFactory(c client.Client, ssaCache ssa.Cache) stru // dryRunPatchHelperFactory makes use of a two-ways patch and is used in situations where we cannot rely on managed fields. func dryRunPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc { - return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) { + return func(_ context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) { return structuredmerge.NewTwoWaysPatchHelper(original, modified, c, opts...) } } diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 1f6ce841339e..3f2013b810f3 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -1420,7 +1420,7 @@ func TestReconciler_DefaultCluster(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { initObjects := []client.Object{tt.initialCluster, tt.clusterClass} fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(initObjects...).Build() r := &Reconciler{ @@ -1513,7 +1513,7 @@ func TestReconciler_ValidateCluster(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { initObjects := []client.Object{tt.cluster, tt.clusterClass} fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(initObjects...).Build() r := &Reconciler{ diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index 34a7b52a24f9..3e6542d6d062 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -1274,7 +1274,7 @@ func templateToObject(in templateToInput) (*unstructured.Unstructured, error) { return nil, errors.Wrapf(err, "failed to generate name for %s", object.GetKind()) } object.SetName(name) - if in.currentObjectRef != nil && len(in.currentObjectRef.Name) > 0 { + if in.currentObjectRef != nil && in.currentObjectRef.Name != "" { object.SetName(in.currentObjectRef.Name) } @@ -1338,7 +1338,7 @@ func templateToTemplate(in templateToInput) (*unstructured.Unstructured, error) return nil, errors.Wrapf(err, "failed to generate name for %s", template.GetKind()) } template.SetName(name) - if in.currentObjectRef != nil && len(in.currentObjectRef.Name) > 0 { + if in.currentObjectRef != nil && in.currentObjectRef.Name != "" { template.SetName(in.currentObjectRef.Name) } diff --git a/internal/controllers/topology/cluster/scope/state_test.go b/internal/controllers/topology/cluster/scope/state_test.go index 40a7a9d97a6c..b7cdbfb0f9cd 100644 --- a/internal/controllers/topology/cluster/scope/state_test.go +++ b/internal/controllers/topology/cluster/scope/state_test.go @@ -122,7 +122,7 @@ func TestUpgrading(t *testing.T) { ctx := context.Background() - t.Run("should return the names of the upgrading MachineDeployments", func(t *testing.T) { + t.Run("should return the names of the upgrading MachineDeployments", func(*testing.T) { stableMD := builder.MachineDeployment("ns", "stableMD"). WithClusterName("cluster1"). WithVersion("v1.2.3"). diff --git a/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go b/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go index 1a234f49d59e..65fb42436f1b 100644 --- a/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go +++ b/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go @@ -484,7 +484,7 @@ func TestServerSideApply(t *testing.T) { _, err := NewServerSidePatchHelper(ctx, original, modified, env.GetClient(), ssa.NewCache()) g.Expect(err).To(HaveOccurred()) }) - t.Run("Error on object which does not exist (anymore) but was expected to get updated", func(t *testing.T) { + t.Run("Error on object which does not exist (anymore) but was expected to get updated", func(*testing.T) { original := builder.TestInfrastructureCluster(ns.Name, "obj3").WithSpecFields(map[string]interface{}{ "spec.controlPlaneEndpoint.host": "1.2.3.4", "spec.controlPlaneEndpoint.port": int64(1234), diff --git a/internal/goproxy/goproxy.go b/internal/goproxy/goproxy.go index 3448de0fa3b1..1d2f3bb5e9ac 100644 --- a/internal/goproxy/goproxy.go +++ b/internal/goproxy/goproxy.go @@ -81,7 +81,7 @@ func (g *Client) GetVersions(ctx context.Context, gomodulePath string) (semver.V var rawResponse []byte var responseStatusCode int var retryError error - _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { + _ = wait.PollUntilContextTimeout(ctx, retryableOperationInterval, retryableOperationTimeout, true, func(context.Context) (bool, error) { retryError = nil resp, err := http.DefaultClient.Do(req) diff --git a/internal/runtime/client/client_test.go b/internal/runtime/client/client_test.go index 4a9ceb165274..cc76d8532257 100644 --- a/internal/runtime/client/client_test.go +++ b/internal/runtime/client/client_test.go @@ -182,7 +182,7 @@ func TestClient_httpCall(t *testing.T) { }, } for _, tt := range tableTests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { // a http server is only required if we have a valid catalog, otherwise httpCall will not reach out to the server if tt.opts != nil && tt.opts.catalog != nil { // create http server with fakeHookHandler diff --git a/internal/runtime/test/v1alpha1/conversion_test.go b/internal/runtime/test/v1alpha1/conversion_test.go index 289bc00b2851..aafd597c2210 100644 --- a/internal/runtime/test/v1alpha1/conversion_test.go +++ b/internal/runtime/test/v1alpha1/conversion_test.go @@ -35,7 +35,7 @@ func TestConversion(t *testing.T) { _ = AddToCatalog(c) _ = v1alpha2.AddToCatalog(c) - t.Run("down-convert FakeRequest v1alpha2 to v1alpha1", func(t *testing.T) { + t.Run("down-convert FakeRequest v1alpha2 to v1alpha1", func(*testing.T) { request := &v1alpha2.FakeRequest{Cluster: clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{ Name: "test", }}} @@ -45,7 +45,7 @@ func TestConversion(t *testing.T) { g.Expect(requestLocal.Cluster.GetName()).To(Equal(request.Cluster.Name)) }) - t.Run("up-convert FakeResponse v1alpha1 to v1alpha2", func(t *testing.T) { + t.Run("up-convert FakeResponse v1alpha1 to v1alpha2", func(*testing.T) { responseLocal := &FakeResponse{ First: 1, Second: "foo", diff --git a/internal/topology/check/compatibility_test.go b/internal/topology/check/compatibility_test.go index 4db1c1aeb624..cd9b19eafb34 100644 --- a/internal/topology/check/compatibility_test.go +++ b/internal/topology/check/compatibility_test.go @@ -591,7 +591,7 @@ func TestClusterClassesAreCompatible(t *testing.T) { } for _, tt := range tests { g := NewWithT(t) - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { allErrs := ClusterClassesAreCompatible(tt.current, tt.desired) if tt.wantErr { g.Expect(allErrs).ToNot(BeEmpty()) diff --git a/internal/topology/variables/cluster_variable_defaulting_test.go b/internal/topology/variables/cluster_variable_defaulting_test.go index b6a7e72e55a4..a874fb0a55a2 100644 --- a/internal/topology/variables/cluster_variable_defaulting_test.go +++ b/internal/topology/variables/cluster_variable_defaulting_test.go @@ -1271,7 +1271,7 @@ func Test_DefaultClusterVariable(t *testing.T) { func Test_getAllVariables(t *testing.T) { g := NewWithT(t) - t.Run("Expect values to be correctly consolidated in allVariables", func(t *testing.T) { + t.Run("Expect values to be correctly consolidated in allVariables", func(*testing.T) { expectedValues := []clusterv1.ClusterVariable{ // var1 has a value with no DefinitionFrom set and only one definition. It should be retained as is. { diff --git a/internal/util/ssa/patch_test.go b/internal/util/ssa/patch_test.go index b9d97c3d8334..e7b89e1cef67 100644 --- a/internal/util/ssa/patch_test.go +++ b/internal/util/ssa/patch_test.go @@ -38,7 +38,7 @@ func TestPatch(t *testing.T) { ns, err := env.CreateNamespace(ctx, "ssa") g.Expect(err).ToNot(HaveOccurred()) - t.Run("Test patch with unstructured", func(t *testing.T) { + t.Run("Test patch with unstructured", func(*testing.T) { // Build the test object to work with. initialObject := builder.TestInfrastructureCluster(ns.Name, "obj1").WithSpecFields(map[string]interface{}{ "spec.controlPlaneEndpoint.host": "1.2.3.4", @@ -88,7 +88,7 @@ func TestPatch(t *testing.T) { g.Expect(ssaCache.Has(requestIdentifier)).To(BeTrue()) }) - t.Run("Test patch with Machine", func(t *testing.T) { + t.Run("Test patch with Machine", func(*testing.T) { // Build the test object to work with. initialObject := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ diff --git a/internal/webhooks/cluster_test.go b/internal/webhooks/cluster_test.go index 663d09be99c8..d83392437e3c 100644 --- a/internal/webhooks/cluster_test.go +++ b/internal/webhooks/cluster_test.go @@ -1573,7 +1573,7 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. if tt.classReconciled { conditions.MarkTrue(tt.class, clusterv1.ClusterClassVariablesReconciledCondition) @@ -1996,7 +1996,7 @@ func TestClusterTopologyValidationForTopologyClassChange(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. conditions.MarkTrue(tt.firstClass, clusterv1.ClusterClassVariablesReconciledCondition) conditions.MarkTrue(tt.secondClass, clusterv1.ClusterClassVariablesReconciledCondition) @@ -2121,7 +2121,7 @@ func TestMovingBetweenManagedAndUnmanaged(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { // Mark this condition to true so the webhook sees the ClusterClass as up to date. conditions.MarkTrue(tt.clusterClass, clusterv1.ClusterClassVariablesReconciledCondition) // Sets up the fakeClient for the test case. @@ -2261,7 +2261,7 @@ func TestClusterClassPollingErrors(t *testing.T) { oldCluster: builder.Cluster(metav1.NamespaceDefault, "cluster1").WithTopology(topology).Build(), clusterClasses: []*clusterv1.ClusterClass{ccFullyReconciled, secondFullyReconciled}, injectedErr: interceptor.Funcs{ - Get: func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + Get: func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error { // Throw an error if the second ClusterClass `class2` used as the new ClusterClass is being retrieved. if key.Name == secondTopology.Class { return errors.New("connection error") @@ -2278,7 +2278,7 @@ func TestClusterClassPollingErrors(t *testing.T) { oldCluster: builder.Cluster(metav1.NamespaceDefault, "cluster1").WithTopology(topology).Build(), clusterClasses: []*clusterv1.ClusterClass{ccFullyReconciled, secondFullyReconciled}, injectedErr: interceptor.Funcs{ - Get: func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + Get: func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error { // Throw an error if the ClusterClass `class1` used as the old ClusterClass is being retrieved. if key.Name == topology.Class { return errors.New("connection error") @@ -2292,7 +2292,7 @@ func TestClusterClassPollingErrors(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { // Sets up a reconcile with a fakeClient for the test case. objs := []client.Object{} for _, cc := range tt.clusterClasses { diff --git a/test/e2e/scale.go b/test/e2e/scale.go index dbac69dc0155..7e0d3b9d6983 100644 --- a/test/e2e/scale.go +++ b/test/e2e/scale.go @@ -703,7 +703,7 @@ func deleteClusterAndWaitWorker(ctx context.Context, inputChan <-chan string, re type clusterUpgrader func(ctx context.Context, namespace, clusterName string, clusterTemplateYAML []byte) func getClusterUpgradeAndWaitFn(input framework.UpgradeClusterTopologyAndWaitForUpgradeInput) clusterUpgrader { - return func(ctx context.Context, namespace, clusterName string, clusterTemplateYAML []byte) { + return func(ctx context.Context, namespace, clusterName string, _ []byte) { resources := getClusterResourcesForUpgrade(ctx, input.ClusterProxy.GetClient(), namespace, clusterName) // Nb. We cannot directly modify and use `input` in this closure function because this function // will be called multiple times and this closure will keep modifying the same `input` multiple diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index b2da6b4e2825..6f71cd9aee72 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -84,7 +84,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo // By using WalkTemplates it is possible to implement patches using typed API objects, which makes code // easier to read and less error prone than using unstructured or working with raw json/yaml. // IMPORTANT: by unit testing this func/nested func properly, it is possible to prevent unexpected rollouts when patches are modified. - topologymutation.WalkTemplates(ctx, h.decoder, req, resp, func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, holderRef runtimehooksv1.HolderReference) error { + topologymutation.WalkTemplates(ctx, h.decoder, req, resp, func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, _ runtimehooksv1.HolderReference) error { log := ctrl.LoggerFrom(ctx) switch obj := obj.(type) { diff --git a/test/extension/handlers/topologymutation/handler_test.go b/test/extension/handlers/topologymutation/handler_test.go index e843adbfbd9c..9af73565c421 100644 --- a/test/extension/handlers/topologymutation/handler_test.go +++ b/test/extension/handlers/topologymutation/handler_test.go @@ -85,7 +85,7 @@ func Test_patchDockerClusterTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { err := patchDockerClusterTemplate(context.Background(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) @@ -182,7 +182,7 @@ func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { err := patchKubeadmControlPlaneTemplate(context.Background(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) @@ -276,7 +276,7 @@ func Test_patchKubeadmConfigTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { err := patchKubeadmConfigTemplate(context.Background(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) @@ -327,7 +327,7 @@ func Test_patchDockerMachineTemplate(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { err := patchDockerMachineTemplate(context.Background(), tt.template, tt.variables) if tt.expectedErr { g.Expect(err).To(HaveOccurred()) @@ -430,7 +430,7 @@ func TestHandler_GeneratePatches(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { response := &runtimehooksv1.GeneratePatchesResponse{} request := &runtimehooksv1.GeneratePatchesRequest{Items: tt.requestItems} h.GeneratePatches(context.Background(), request, response) diff --git a/test/framework/cluster_proxy.go b/test/framework/cluster_proxy.go index b52943f790c7..5784452d9180 100644 --- a/test/framework/cluster_proxy.go +++ b/test/framework/cluster_proxy.go @@ -200,7 +200,7 @@ func (p *clusterProxy) GetClient() client.Client { var c client.Client var newClientErr error - err := wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), retryableOperationInterval, retryableOperationTimeout, true, func(context.Context) (bool, error) { c, newClientErr = client.New(config, client.Options{Scheme: p.scheme}) if newClientErr != nil { return false, nil //nolint:nilerr diff --git a/test/framework/namespace_helpers.go b/test/framework/namespace_helpers.go index 2da532ef9769..4597763b8ccc 100644 --- a/test/framework/namespace_helpers.go +++ b/test/framework/namespace_helpers.go @@ -166,7 +166,7 @@ func WatchNamespaceEvents(ctx context.Context, input WatchNamespaceEventsInput) _, _ = fmt.Fprintf(f, "[Updated Event] %s\n\tresource: %s/%s/%s\n\treason: %s\n\tmessage: %s\n\tfull: %#v\n", klog.KObj(e), e.InvolvedObject.APIVersion, e.InvolvedObject.Kind, e.InvolvedObject.Name, e.Reason, e.Message, e) }, - DeleteFunc: func(obj interface{}) {}, + DeleteFunc: func(interface{}) {}, }) Expect(err).ToNot(HaveOccurred()) diff --git a/test/framework/suite_helpers.go b/test/framework/suite_helpers.go index 4db07dc7f8c0..6f47177fab77 100644 --- a/test/framework/suite_helpers.go +++ b/test/framework/suite_helpers.go @@ -33,7 +33,7 @@ func GatherJUnitReports(srcDir string, destDir string) error { return err } - return filepath.Walk(srcDir, func(p string, info os.FileInfo, err error) error { + return filepath.Walk(srcDir, func(p string, info os.FileInfo, _ error) error { if info.IsDir() && p != srcDir { return filepath.SkipDir } diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go index 73d70380f714..7e4726246641 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller_phases.go @@ -333,7 +333,7 @@ func (r *DockerMachinePoolReconciler) propagateMachineDeleteAnnotation(ctx conte // orderByDeleteMachineAnnotation will sort DockerMachines with the clusterv1.DeleteMachineAnnotation to the front of the list. // It will preserve the existing order of the list otherwise so that it respects the existing delete priority otherwise. func orderByDeleteMachineAnnotation(machines []infrav1.DockerMachine) []infrav1.DockerMachine { - sort.SliceStable(machines, func(i, j int) bool { + sort.SliceStable(machines, func(i, _ int) bool { _, iHasAnnotation := machines[i].Annotations[clusterv1.DeleteMachineAnnotation] return iHasAnnotation diff --git a/test/infrastructure/docker/internal/docker/util.go b/test/infrastructure/docker/internal/docker/util.go index f8fc4cb8e4cc..9fe5121e48de 100644 --- a/test/infrastructure/docker/internal/docker/util.go +++ b/test/infrastructure/docker/internal/docker/util.go @@ -87,7 +87,7 @@ func getContainer(ctx context.Context, filters container.FilterBuilder) (*types. // https://docs.docker.com/engine/reference/commandline/ps/#filtering func List(ctx context.Context, filters container.FilterBuilder) ([]*types.Node, error) { res := []*types.Node{} - visit := func(ctx context.Context, cluster string, node *types.Node) { + visit := func(_ context.Context, _ string, node *types.Node) { res = append(res, node) } return res, list(ctx, visit, filters) diff --git a/test/infrastructure/docker/internal/loadbalancer/config_test.go b/test/infrastructure/docker/internal/loadbalancer/config_test.go index 460b6d9150b1..f398d3cc6b13 100644 --- a/test/infrastructure/docker/internal/loadbalancer/config_test.go +++ b/test/infrastructure/docker/internal/loadbalancer/config_test.go @@ -183,7 +183,7 @@ backend rke2-servers } for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { config, err := Config(tc.data, tc.configTemplate) g.Expect(err).NotTo(HaveOccurred()) g.Expect(config).To(Equal(tc.expectedConfig)) diff --git a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller_test.go b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller_test.go index 6c63ae164772..de9aa0d933af 100644 --- a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller_test.go +++ b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller_test.go @@ -281,7 +281,7 @@ func TestReconcileNormalEtcd(t *testing.T) { }, } - t.Run("no-op for worker machines", func(t *testing.T) { + t.Run("no-op for worker machines", func(*testing.T) { // TODO: implement test }) @@ -488,7 +488,7 @@ func TestReconcileNormalApiServer(t *testing.T) { }, } - t.Run("no-op for worker machines", func(t *testing.T) { + t.Run("no-op for worker machines", func(*testing.T) { // TODO: implement test }) diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go index 24416549b5c1..ac9bbc87d460 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go @@ -66,7 +66,7 @@ func Test_cache_client(t *testing.T) { g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) }) - t.Run("fails if unknown kind", func(t *testing.T) { + t.Run("fails if unknown kind", func(*testing.T) { // TODO implement test case }) @@ -226,7 +226,7 @@ func Test_cache_client(t *testing.T) { g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) }) - t.Run("fails if unknown kind", func(t *testing.T) { + t.Run("fails if unknown kind", func(*testing.T) { // TODO implement test case }) @@ -298,7 +298,7 @@ func Test_cache_client(t *testing.T) { g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) }) - t.Run("fails if unknown kind", func(t *testing.T) { + t.Run("fails if unknown kind", func(*testing.T) { // TODO implement test case }) @@ -364,7 +364,7 @@ func Test_cache_client(t *testing.T) { g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) }) - t.Run("fails if unknown kind", func(t *testing.T) { + t.Run("fails if unknown kind", func(*testing.T) { // TODO implement test case }) @@ -583,7 +583,7 @@ func Test_cache_client(t *testing.T) { g.Expect(apierrors.IsBadRequest(err)).To(BeTrue()) }) - t.Run("fails if unknown kind", func(t *testing.T) { + t.Run("fails if unknown kind", func(*testing.T) { // TODO implement test case }) diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/gc.go b/test/infrastructure/inmemory/pkg/runtime/cache/gc.go index 85c134a117ba..0a7c5b1c51de 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/gc.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/gc.go @@ -64,7 +64,7 @@ func (c *cache) startGarbageCollector(ctx context.Context) error { wg.Wait() }() - if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(context.Context) (done bool, err error) { if atomic.LoadInt64(&workers) < int64(c.garbageCollectorConcurrency) { return false, nil } diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/sync.go b/test/infrastructure/inmemory/pkg/runtime/cache/sync.go index 1caa13e9f4df..5057e599d070 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/sync.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/sync.go @@ -81,7 +81,7 @@ func (c *cache) startSyncer(ctx context.Context) error { wg.Wait() }() - if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(context.Context) (done bool, err error) { if !syncLoopStarted { return false, nil } @@ -90,7 +90,7 @@ func (c *cache) startSyncer(ctx context.Context) error { return fmt.Errorf("failed to start sync loop: %v", err) } - if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, false, func(context.Context) (done bool, err error) { if atomic.LoadInt64(&workers) < int64(c.syncConcurrency) { return false, nil } diff --git a/test/infrastructure/inmemory/pkg/server/api/handler.go b/test/infrastructure/inmemory/pkg/server/api/handler.go index 8f5636dc916b..e0a2db0e671e 100644 --- a/test/infrastructure/inmemory/pkg/server/api/handler.go +++ b/test/infrastructure/inmemory/pkg/server/api/handler.go @@ -606,7 +606,7 @@ func (h *apiServerHandler) apiV1PortForward(req *restful.Request, resp *restful. streamChan, podName, podNamespace, - func(ctx context.Context, podName, podNamespace, _ string, stream io.ReadWriteCloser) error { + func(ctx context.Context, _, _, _ string, stream io.ReadWriteCloser) error { // Given that in the in-memory provider there is no real infrastructure, and thus no real workload cluster, // we are going to forward all the connection back to the same server (the CAPIM controller pod). return h.doPortForward(ctx, req.Request.Host, stream) diff --git a/test/infrastructure/inmemory/pkg/server/api/portforward/httpstreams.go b/test/infrastructure/inmemory/pkg/server/api/portforward/httpstreams.go index 2ded9e663ed6..e3034f10b3a3 100644 --- a/test/infrastructure/inmemory/pkg/server/api/portforward/httpstreams.go +++ b/test/infrastructure/inmemory/pkg/server/api/portforward/httpstreams.go @@ -34,7 +34,7 @@ import ( // HTTPStreamReceived is the httpstream.NewStreamHandler for port // forward streams. Each valid stream is sent to the streams channel. func HTTPStreamReceived(streamsCh chan httpstream.Stream) func(httpstream.Stream, <-chan struct{}) error { - return func(stream httpstream.Stream, replySent <-chan struct{}) error { + return func(stream httpstream.Stream, _ <-chan struct{}) error { // make sure it has a valid stream type header streamType := stream.Headers().Get(corev1.StreamType) if streamType == "" { diff --git a/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go b/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go index 113dc1f1bd62..12fa4b05cbac 100644 --- a/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go +++ b/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go @@ -43,7 +43,7 @@ func Test_etcd_scalingflow(t *testing.T) { g := NewWithT(t) ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{":authority": "etcd-1"})) manager := inmemoryruntime.NewManager(scheme) - resourceGroupResolver := func(host string) (string, error) { return "group1", nil } + resourceGroupResolver := func(string) (string, error) { return "group1", nil } c := &clusterServerServer{ baseServer: &baseServer{ log: log.FromContext(ctx), @@ -99,7 +99,7 @@ func Test_etcd_scalingflow(t *testing.T) { var etcdMemberToRemove uint64 = 2 var etcdMemberToBeLeader uint64 = 3 - t.Run("move leader and remove etcd member", func(t *testing.T) { + t.Run("move leader and remove etcd member", func(*testing.T) { _, err := m.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: etcdMemberToBeLeader}) g.Expect(err).NotTo(HaveOccurred()) diff --git a/test/infrastructure/inmemory/pkg/server/mux.go b/test/infrastructure/inmemory/pkg/server/mux.go index 661ea852a998..8d47d273af81 100644 --- a/test/infrastructure/inmemory/pkg/server/mux.go +++ b/test/infrastructure/inmemory/pkg/server/mux.go @@ -451,7 +451,7 @@ func (m *WorkloadClustersMux) AddAPIServer(wclName, podName string, caCert *x509 // Wait until the sever is working. var pollErr error - err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Millisecond, 1*time.Second, true, func(ctx context.Context) (done bool, err error) { + err = wait.PollUntilContextTimeout(context.TODO(), 10*time.Millisecond, 1*time.Second, true, func(context.Context) (done bool, err error) { d := &net.Dialer{Timeout: 50 * time.Millisecond} conn, err := tls.DialWithDialer(d, "tcp", wcl.HostPort(), &tls.Config{ InsecureSkipVerify: true, //nolint:gosec // config is used to connect to our own port. diff --git a/util/annotations/helpers_test.go b/util/annotations/helpers_test.go index de973cfb9c14..b0991d6e192e 100644 --- a/util/annotations/helpers_test.go +++ b/util/annotations/helpers_test.go @@ -178,7 +178,7 @@ func TestAddAnnotations(t *testing.T) { } for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { res := AddAnnotations(tc.obj, tc.input) g.Expect(res).To(Equal(tc.changed)) g.Expect(tc.obj.GetAnnotations()).To(Equal(tc.expected)) diff --git a/util/certs/certs_test.go b/util/certs/certs_test.go index edf211a5c9d8..88071c139158 100644 --- a/util/certs/certs_test.go +++ b/util/certs/certs_test.go @@ -133,7 +133,7 @@ func TestDecodeCertPEM(t *testing.T) { for _, tc := range cases { g := NewWithT(t) - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { _, err := DecodeCertPEM(tc.key) if tc.expectError { g.Expect(err).To(HaveOccurred()) diff --git a/util/container/image_test.go b/util/container/image_test.go index cb8aecdd1213..7fbc27f59294 100644 --- a/util/container/image_test.go +++ b/util/container/image_test.go @@ -140,7 +140,7 @@ func TestParseImageName(t *testing.T) { for _, tc := range testCases { g := NewWithT(t) - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { image, err := ImageFromString(tc.input) if tc.wantError { g.Expect(err).To(HaveOccurred()) @@ -211,7 +211,7 @@ func TestModifyImageRepository(t *testing.T) { for _, tc := range testCases { g := NewWithT(t) - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { res, err := ModifyImageRepository(tc.image, tc.repo) if tc.wantError { g.Expect(err).To(HaveOccurred()) @@ -226,14 +226,14 @@ func TestModifyImageRepository(t *testing.T) { func TestModifyImageTag(t *testing.T) { g := NewWithT(t) - t.Run("should ensure image is a docker compatible tag", func(t *testing.T) { + t.Run("should ensure image is a docker compatible tag", func(*testing.T) { testTag := "v1.17.4+build1" image := "example.com/image:1.17.3" res, err := ModifyImageTag(image, testTag) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res).To(Equal("example.com/image:v1.17.4_build1")) }) - t.Run("should ensure image is a docker compatible tag with docker.io", func(t *testing.T) { + t.Run("should ensure image is a docker compatible tag with docker.io", func(*testing.T) { testTag := "v1.17.4+build1" image := "docker.io/dev/image:1.17.3" res, err := ModifyImageTag(image, testTag) diff --git a/util/conversion/conversion_test.go b/util/conversion/conversion_test.go index 0917612211c9..0cb5e15e8b1f 100644 --- a/util/conversion/conversion_test.go +++ b/util/conversion/conversion_test.go @@ -38,7 +38,7 @@ var ( func TestMarshalData(t *testing.T) { g := NewWithT(t) - t.Run("should write source object to destination", func(t *testing.T) { + t.Run("should write source object to destination", func(*testing.T) { version := "v1.16.4" providerID := "aws://some-id" src := &clusterv1.Machine{ @@ -71,7 +71,7 @@ func TestMarshalData(t *testing.T) { g.Expect(dst.GetAnnotations()[DataAnnotation]).ToNot(ContainSubstring("label1")) }) - t.Run("should append the annotation", func(t *testing.T) { + t.Run("should append the annotation", func(*testing.T) { src := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", @@ -92,7 +92,7 @@ func TestMarshalData(t *testing.T) { func TestUnmarshalData(t *testing.T) { g := NewWithT(t) - t.Run("should return false without errors if annotation doesn't exist", func(t *testing.T) { + t.Run("should return false without errors if annotation doesn't exist", func(*testing.T) { src := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-1", @@ -107,7 +107,7 @@ func TestUnmarshalData(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) }) - t.Run("should return true when a valid annotation with data exists", func(t *testing.T) { + t.Run("should return true when a valid annotation with data exists", func(*testing.T) { src := &unstructured.Unstructured{} src.SetGroupVersionKind(oldMachineGVK) src.SetName("test-1") @@ -131,7 +131,7 @@ func TestUnmarshalData(t *testing.T) { g.Expect(dst.GetAnnotations()).To(BeEmpty()) }) - t.Run("should clean the annotation on successful unmarshal", func(t *testing.T) { + t.Run("should clean the annotation on successful unmarshal", func(*testing.T) { src := &unstructured.Unstructured{} src.SetGroupVersionKind(oldMachineGVK) src.SetName("test-1") diff --git a/util/labels/format/helpers_test.go b/util/labels/format/helpers_test.go index df1d379996cf..f44e3f35b16c 100644 --- a/util/labels/format/helpers_test.go +++ b/util/labels/format/helpers_test.go @@ -41,7 +41,7 @@ func TestNameLabelValue(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { got := MustFormatValue(tt.machineSetName) g.Expect(got).To(gomega.Equal(tt.want)) }) @@ -82,7 +82,7 @@ func TestMustMatchLabelValueForName(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { got := MustEqualValue(tt.machineSetName, tt.labelValue) g.Expect(got).To(gomega.Equal(tt.want)) }) diff --git a/util/labels/helpers_test.go b/util/labels/helpers_test.go index 00af2f5abb1b..e2ff0bd0ebbc 100644 --- a/util/labels/helpers_test.go +++ b/util/labels/helpers_test.go @@ -78,7 +78,7 @@ func TestHasWatchLabel(t *testing.T) { } for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { res := HasWatchLabel(tc.obj, tc.input) g.Expect(res).To(Equal(tc.expected)) }) diff --git a/util/predicates/cluster_predicates.go b/util/predicates/cluster_predicates.go index 1fdfa678f56e..92986f8c88b7 100644 --- a/util/predicates/cluster_predicates.go +++ b/util/predicates/cluster_predicates.go @@ -53,9 +53,9 @@ func ClusterCreateInfraReady(logger logr.Logger) predicate.Funcs { log.V(4).Info("Cluster infrastructure is not ready, blocking further processing") return false }, - UpdateFunc: func(e event.UpdateEvent) bool { return false }, - DeleteFunc: func(e event.DeleteEvent) bool { return false }, - GenericFunc: func(e event.GenericEvent) bool { return false }, + UpdateFunc: func(event.UpdateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } @@ -82,9 +82,9 @@ func ClusterCreateNotPaused(logger logr.Logger) predicate.Funcs { log.V(4).Info("Cluster is paused, blocking further processing") return false }, - UpdateFunc: func(e event.UpdateEvent) bool { return false }, - DeleteFunc: func(e event.DeleteEvent) bool { return false }, - GenericFunc: func(e event.GenericEvent) bool { return false }, + UpdateFunc: func(event.UpdateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } @@ -112,9 +112,9 @@ func ClusterUpdateInfraReady(logger logr.Logger) predicate.Funcs { log.V(4).Info("Cluster infrastructure did not become ready, blocking further processing") return false }, - CreateFunc: func(e event.CreateEvent) bool { return false }, - DeleteFunc: func(e event.DeleteEvent) bool { return false }, - GenericFunc: func(e event.GenericEvent) bool { return false }, + CreateFunc: func(event.CreateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } @@ -144,9 +144,9 @@ func ClusterUpdateUnpaused(logger logr.Logger) predicate.Funcs { log.V(6).Info("Cluster was not unpaused, blocking further processing") return false }, - CreateFunc: func(e event.CreateEvent) bool { return false }, - DeleteFunc: func(e event.DeleteEvent) bool { return false }, - GenericFunc: func(e event.GenericEvent) bool { return false }, + CreateFunc: func(event.CreateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } @@ -200,9 +200,9 @@ func ClusterControlPlaneInitialized(logger logr.Logger) predicate.Funcs { log.V(6).Info("Cluster ControlPlaneInitialized hasn't changed, blocking further processing") return false }, - CreateFunc: func(e event.CreateEvent) bool { return false }, - DeleteFunc: func(e event.DeleteEvent) bool { return false }, - GenericFunc: func(e event.GenericEvent) bool { return false }, + CreateFunc: func(event.CreateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } diff --git a/util/predicates/cluster_predicates_test.go b/util/predicates/cluster_predicates_test.go index ddc37ab58b65..253c1cbcc0e6 100644 --- a/util/predicates/cluster_predicates_test.go +++ b/util/predicates/cluster_predicates_test.go @@ -87,7 +87,7 @@ func TestClusterControlplaneInitializedPredicate(t *testing.T) { for i := range testcases { tc := testcases[i] - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { ev := event.UpdateEvent{ ObjectOld: &tc.oldCluster, ObjectNew: &tc.newCluster, diff --git a/util/util_test.go b/util/util_test.go index 7d36aac8b2a3..f37e60284ebd 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -101,7 +101,7 @@ func TestMachineToInfrastructureMapFunc(t *testing.T) { } for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { fn := MachineToInfrastructureMapFunc(tc.input) out := fn(ctx, tc.request) g.Expect(out).To(BeComparableTo(tc.output)) @@ -316,7 +316,7 @@ func TestHasOwner(t *testing.T) { } for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(test.name, func(*testing.T) { result := HasOwner( test.refList, clusterv1.GroupVersion.String(), @@ -433,7 +433,7 @@ func TestIsOwnedByObject(t *testing.T) { } for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(test.name, func(*testing.T) { pointer := &metav1.ObjectMeta{ OwnerReferences: test.refs, } @@ -542,7 +542,7 @@ func TestGetOwnerMachineSuccessByNameFromDifferentVersion(t *testing.T) { func TestIsExternalManagedControlPlane(t *testing.T) { g := NewWithT(t) - t.Run("should return true if control plane status externalManagedControlPlane is true", func(t *testing.T) { + t.Run("should return true if control plane status externalManagedControlPlane is true", func(*testing.T) { controlPlane := &unstructured.Unstructured{ Object: map[string]interface{}{ "status": map[string]interface{}{ @@ -554,7 +554,7 @@ func TestIsExternalManagedControlPlane(t *testing.T) { g.Expect(result).Should(BeTrue()) }) - t.Run("should return false if control plane status externalManagedControlPlane is false", func(t *testing.T) { + t.Run("should return false if control plane status externalManagedControlPlane is false", func(*testing.T) { controlPlane := &unstructured.Unstructured{ Object: map[string]interface{}{ "status": map[string]interface{}{ @@ -566,7 +566,7 @@ func TestIsExternalManagedControlPlane(t *testing.T) { g.Expect(result).Should(BeFalse()) }) - t.Run("should return false if control plane status externalManagedControlPlane is not set", func(t *testing.T) { + t.Run("should return false if control plane status externalManagedControlPlane is not set", func(*testing.T) { controlPlane := &unstructured.Unstructured{ Object: map[string]interface{}{ "status": map[string]interface{}{ @@ -582,7 +582,7 @@ func TestIsExternalManagedControlPlane(t *testing.T) { func TestEnsureOwnerRef(t *testing.T) { g := NewWithT(t) - t.Run("should set ownerRef on an empty list", func(t *testing.T) { + t.Run("should set ownerRef on an empty list", func(*testing.T) { obj := &clusterv1.Machine{} ref := metav1.OwnerReference{ APIVersion: clusterv1.GroupVersion.String(), @@ -593,7 +593,7 @@ func TestEnsureOwnerRef(t *testing.T) { g.Expect(obj.OwnerReferences).Should(ContainElement(ref)) }) - t.Run("should not duplicate owner references", func(t *testing.T) { + t.Run("should not duplicate owner references", func(*testing.T) { obj := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ OwnerReferences: []metav1.OwnerReference{ @@ -615,7 +615,7 @@ func TestEnsureOwnerRef(t *testing.T) { g.Expect(obj.OwnerReferences).Should(HaveLen(1)) }) - t.Run("should update the APIVersion if duplicate", func(t *testing.T) { + t.Run("should update the APIVersion if duplicate", func(*testing.T) { oldgvk := schema.GroupVersion{ Group: clusterv1.GroupVersion.Group, Version: "v1alpha2", @@ -877,7 +877,7 @@ func TestRemoveOwnerRef(t *testing.T) { }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(*testing.T) { // Use a fresh ownerRefs slice for each test, because RemoveOwnerRef may modify the underlying array. ownerRefs := makeOwnerRefs() ownerRefs = RemoveOwnerRef(ownerRefs, tt.toBeRemoved) diff --git a/util/version/version_test.go b/util/version/version_test.go index 416ce61c23f2..b8dae745ceb6 100644 --- a/util/version/version_test.go +++ b/util/version/version_test.go @@ -68,7 +68,7 @@ func TestParseMajorMinorPatch(t *testing.T) { } for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { out, err := ParseMajorMinorPatch(tc.input) g.Expect(err != nil).To(Equal(tc.expectError)) g.Expect(out).To(BeComparableTo(tc.output)) @@ -116,7 +116,7 @@ func TestParseMajorMinorPatchTolerant(t *testing.T) { } for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { + t.Run(tc.name, func(*testing.T) { out, err := ParseMajorMinorPatchTolerant(tc.input) g.Expect(err != nil).To(Equal(tc.expectError)) g.Expect(out).To(BeComparableTo(tc.output)) diff --git a/util/yaml/yaml_test.go b/util/yaml/yaml_test.go index a09acdae321e..4d8ce47444bc 100644 --- a/util/yaml/yaml_test.go +++ b/util/yaml/yaml_test.go @@ -211,7 +211,7 @@ func TestParseClusterYaml(t *testing.T) { }, } for _, testcase := range testcases { - t.Run(testcase.name, func(t *testing.T) { + t.Run(testcase.name, func(*testing.T) { file, err := createTempFile(testcase.contents) g.Expect(err).ToNot(HaveOccurred()) defer os.Remove(file) @@ -285,7 +285,7 @@ func TestParseMachineYaml(t *testing.T) { }, } for _, testcase := range testcases { - t.Run(testcase.name, func(t *testing.T) { + t.Run(testcase.name, func(*testing.T) { file, err := createTempFile(testcase.contents) g.Expect(err).ToNot(HaveOccurred()) defer os.Remove(file)