From b30d12b227f549d2bb8dccc86a8a7bf4a0b215f9 Mon Sep 17 00:00:00 2001 From: Troy Connor Date: Fri, 8 Dec 2023 15:19:13 -0500 Subject: [PATCH] remove k8s.io/utils/pointer from repository Signed-off-by: Troy Connor --- api/v1alpha4/cluster_types.go | 4 +- api/v1alpha4/conversion_test.go | 16 +-- api/v1beta1/cluster_types.go | 4 +- api/v1beta1/index/machine.go | 4 +- api/v1beta1/index/machine_test.go | 6 +- .../internal/cloudinit/cloudinit_test.go | 6 +- .../controllers/kubeadmconfig_controller.go | 8 +- .../kubeadmconfig_controller_test.go | 14 +- .../kubeadm/internal/ignition/clc/clc_test.go | 80 +++++------ .../internal/webhooks/kubeadmconfig_test.go | 20 +-- .../webhooks/kubeadmconfigtemplate_test.go | 4 +- bootstrap/util/configowner_test.go | 16 +-- .../client/alpha/rollout_rollbacker_test.go | 6 +- cmd/clusterctl/client/cluster/mover_test.go | 6 +- cmd/clusterctl/client/cluster/upgrader.go | 4 +- cmd/clusterctl/client/config.go | 6 +- cmd/clusterctl/client/config_test.go | 38 +++--- .../repository/repository_github_test.go | 4 +- cmd/clusterctl/cmd/util.go | 24 ++-- cmd/clusterctl/internal/test/fake_objects.go | 4 +- cmd/clusterctl/internal/test/fake_proxy.go | 4 +- cmd/clusterctl/log/logger_test.go | 6 +- .../internal/controllers/controller.go | 6 +- .../internal/controllers/controller_test.go | 24 ++-- .../internal/controllers/helpers_test.go | 22 +-- .../internal/controllers/remediation_test.go | 72 +++++----- .../internal/controllers/status_test.go | 4 +- .../internal/controllers/upgrade_test.go | 6 +- .../webhooks/kubeadm_control_plane_test.go | 36 ++--- .../kubeadm/internal/webhooks/scale_test.go | 6 +- .../machinepool_controller_phases.go | 8 +- .../machinepool_controller_phases_test.go | 34 ++--- .../machinepool_controller_test.go | 32 ++--- exp/internal/webhooks/machinepool.go | 6 +- exp/internal/webhooks/machinepool_test.go | 12 +- exp/ipam/internal/webhooks/ipaddress_test.go | 4 +- .../internal/webhooks/ipaddressclaim_test.go | 4 +- .../extensionconfig_controller_test.go | 4 +- go.mod | 2 +- go.sum | 4 +- hack/tools/go.mod | 2 +- hack/tools/go.sum | 4 +- hack/tools/internal/tilt-prepare/main.go | 28 ++-- internal/contract/controlplane.go | 4 +- .../cluster/cluster_controller_phases.go | 6 +- .../cluster/cluster_controller_test.go | 8 +- .../clusterclass_controller_test.go | 14 +- .../machine_controller_noderef_test.go | 4 +- .../machine/machine_controller_phases.go | 12 +- .../machine/machine_controller_phases_test.go | 6 +- .../machine/machine_controller_test.go | 58 ++++---- .../machinedeployment_controller_test.go | 30 ++-- .../machinedeployment_rolling_test.go | 44 +++--- .../machinedeployment_sync.go | 6 +- .../machinedeployment_sync_test.go | 62 ++++----- .../machinedeployment/mdutil/util_test.go | 24 ++-- .../machinedeployment/suite_test.go | 4 +- .../machinehealthcheck_controller_test.go | 10 +- .../machineset/machineset_controller_test.go | 12 +- .../machineset/machineset_preflight.go | 12 +- .../machineset/machineset_preflight_test.go | 30 ++-- internal/controllers/machineset/suite_test.go | 4 +- .../topology/cluster/desired_state.go | 6 +- .../topology/cluster/desired_state_test.go | 30 ++-- .../topology/cluster/patches/engine_test.go | 46 +++---- .../external/external_patch_generator_test.go | 6 +- .../inline/json_patch_generator_test.go | 128 +++++++++--------- .../cluster/patches/variables/value.go | 4 +- .../cluster/patches/variables/value_test.go | 4 +- .../cluster/patches/variables/variables.go | 6 +- .../patches/variables/variables_test.go | 30 ++-- .../topology/cluster/reconcile_state_test.go | 32 ++--- .../topology/cluster/scope/blueprint_test.go | 18 +-- .../serversidepathhelper_test.go | 4 +- internal/runtime/client/client.go | 4 +- internal/runtime/client/client_test.go | 36 ++--- internal/runtime/registry/registry_test.go | 6 +- internal/test/builder/crds.go | 6 +- .../cluster_variable_validation_test.go | 30 ++-- .../clusterclass_variable_validation_test.go | 68 +++++----- internal/topology/variables/schema.go | 4 +- internal/topology/variables/schema_test.go | 54 ++++---- internal/util/ssa/patch_test.go | 6 +- internal/webhooks/cluster_test.go | 14 +- internal/webhooks/clusterclass_test.go | 28 ++-- internal/webhooks/machine_test.go | 8 +- internal/webhooks/machinedeployment.go | 10 +- internal/webhooks/machinedeployment_test.go | 22 +-- internal/webhooks/machineset.go | 4 +- internal/webhooks/machineset_test.go | 14 +- internal/webhooks/patch_validation_test.go | 58 ++++---- .../runtime/extensionconfig_webhook.go | 4 +- .../runtime/extensionconfig_webhook_test.go | 24 ++-- test/e2e/autoscaler.go | 6 +- test/e2e/autoscaler_test.go | 6 +- test/e2e/cluster_upgrade.go | 12 +- test/e2e/cluster_upgrade_runtimesdk.go | 8 +- test/e2e/cluster_upgrade_runtimesdk_test.go | 6 +- test/e2e/cluster_upgrade_test.go | 30 ++-- test/e2e/clusterclass_changes.go | 6 +- test/e2e/clusterclass_changes_test.go | 4 +- test/e2e/clusterclass_rollout.go | 12 +- test/e2e/clusterclass_rollout_test.go | 4 +- test/e2e/clusterctl_upgrade.go | 10 +- test/e2e/clusterctl_upgrade_test.go | 12 +- test/e2e/k8s_conformance.go | 6 +- test/e2e/k8s_conformance_test.go | 4 +- test/e2e/kcp_adoption.go | 8 +- test/e2e/kcp_adoption_test.go | 4 +- test/e2e/kcp_remediations.go | 10 +- test/e2e/kcp_remediations_test.go | 4 +- test/e2e/machine_pool.go | 8 +- test/e2e/machine_pool_test.go | 4 +- test/e2e/md_remediations.go | 8 +- test/e2e/md_remediations_test.go | 4 +- test/e2e/md_rollout.go | 6 +- test/e2e/md_rollout_test.go | 4 +- test/e2e/md_scale.go | 8 +- test/e2e/md_scale_test.go | 4 +- test/e2e/node_drain_timeout.go | 10 +- test/e2e/node_drain_timeout_test.go | 4 +- test/e2e/quick_start.go | 6 +- test/e2e/quick_start_test.go | 30 ++-- test/e2e/scale.go | 14 +- test/e2e/scale_test.go | 30 ++-- test/e2e/self_hosted.go | 6 +- test/e2e/self_hosted_test.go | 22 +-- test/framework/autoscaler_helpers.go | 6 +- test/framework/clusterctl/e2e_config.go | 6 +- test/framework/controlplane_helpers.go | 6 +- test/framework/deployment_helpers.go | 4 +- test/framework/machinedeployment_helpers.go | 12 +- test/framework/ownerreference_helpers.go | 16 +-- test/go.mod | 2 +- test/go.sum | 4 +- test/infrastructure/container/docker.go | 4 +- .../dockermachinepool_controller.go | 4 +- .../dockermachinetemplate_webhook_test.go | 14 +- .../controllers/inmemorymachine_controller.go | 4 +- util/collections/machine_collection_test.go | 26 ++-- util/collections/machine_filters_test.go | 38 +++--- util/failuredomains/failure_domains.go | 4 +- util/failuredomains/failure_domains_test.go | 10 +- util/patch/patch_test.go | 6 +- util/topology/topology_test.go | 12 +- 145 files changed, 1083 insertions(+), 1083 deletions(-) diff --git a/api/v1alpha4/cluster_types.go b/api/v1alpha4/cluster_types.go index d4ee0658e5e6..019329d7c62e 100644 --- a/api/v1alpha4/cluster_types.go +++ b/api/v1alpha4/cluster_types.go @@ -24,7 +24,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" capierrors "sigs.k8s.io/cluster-api/errors" ) @@ -422,7 +422,7 @@ func (in FailureDomains) FilterControlPlane() FailureDomains { func (in FailureDomains) GetIDs() []*string { ids := make([]*string, 0, len(in)) for id := range in { - ids = append(ids, pointer.String(id)) + ids = append(ids, ptr.To(id)) } return ids } diff --git a/api/v1alpha4/conversion_test.go b/api/v1alpha4/conversion_test.go index 54685c415836..ce34632007d9 100644 --- a/api/v1alpha4/conversion_test.go +++ b/api/v1alpha4/conversion_test.go @@ -24,7 +24,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" @@ -113,17 +113,17 @@ func JSONSchemaPropsFuzzer(in *clusterv1.JSONSchemaProps, c fuzz.Continue) { for i := 0; i < c.Intn(10); i++ { in.Required = append(in.Required, c.RandString()) } - in.MaxItems = pointer.Int64(c.Int63()) - in.MinItems = pointer.Int64(c.Int63()) + in.MaxItems = ptr.To(c.Int63()) + in.MinItems = ptr.To(c.Int63()) in.UniqueItems = c.RandBool() in.Format = c.RandString() - in.MaxLength = pointer.Int64(c.Int63()) - in.MinLength = pointer.Int64(c.Int63()) + in.MaxLength = ptr.To(c.Int63()) + in.MinLength = ptr.To(c.Int63()) in.Pattern = c.RandString() - in.Maximum = pointer.Int64(c.Int63()) - in.Maximum = pointer.Int64(c.Int63()) + in.Maximum = ptr.To(c.Int63()) + in.Maximum = ptr.To(c.Int63()) in.ExclusiveMaximum = c.RandBool() - in.Minimum = pointer.Int64(c.Int63()) + in.Minimum = ptr.To(c.Int63()) in.ExclusiveMinimum = c.RandBool() // Not every random byte array is valid JSON, e.g. a string without `""`,so we're setting valid values. diff --git a/api/v1beta1/cluster_types.go b/api/v1beta1/cluster_types.go index 456d83622393..12c258f22a0a 100644 --- a/api/v1beta1/cluster_types.go +++ b/api/v1beta1/cluster_types.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" capierrors "sigs.k8s.io/cluster-api/errors" ) @@ -622,7 +622,7 @@ func (in FailureDomains) FilterControlPlane() FailureDomains { func (in FailureDomains) GetIDs() []*string { ids := make([]*string, 0, len(in)) for id := range in { - ids = append(ids, pointer.String(id)) + ids = append(ids, ptr.To(id)) } return ids } diff --git a/api/v1beta1/index/machine.go b/api/v1beta1/index/machine.go index ea9bebb931d3..28d6fce5352f 100644 --- a/api/v1beta1/index/machine.go +++ b/api/v1beta1/index/machine.go @@ -21,7 +21,7 @@ import ( "fmt" "github.com/pkg/errors" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -81,7 +81,7 @@ func machineByProviderID(o client.Object) []string { panic(fmt.Sprintf("Expected a Machine but got a %T", o)) } - providerID := pointer.StringDeref(machine.Spec.ProviderID, "") + providerID := ptr.Deref(machine.Spec.ProviderID, "") if providerID == "" { return nil diff --git a/api/v1beta1/index/machine_test.go b/api/v1beta1/index/machine_test.go index e96519e537c2..2aa0061503f2 100644 --- a/api/v1beta1/index/machine_test.go +++ b/api/v1beta1/index/machine_test.go @@ -21,7 +21,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -77,7 +77,7 @@ func TestIndexMachineByProviderID(t *testing.T) { name: "Machine has invalid providerID", object: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - ProviderID: pointer.String(""), + ProviderID: ptr.To(""), }, }, expected: nil, @@ -86,7 +86,7 @@ func TestIndexMachineByProviderID(t *testing.T) { name: "Machine has valid providerID", object: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - ProviderID: pointer.String(validProviderID), + ProviderID: ptr.To(validProviderID), }, }, expected: []string{validProviderID}, diff --git a/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go b/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go index 8ea39e3f6fd0..626d2479888d 100644 --- a/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go +++ b/bootstrap/kubeadm/internal/cloudinit/cloudinit_test.go @@ -20,7 +20,7 @@ import ( "testing" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util/certs" @@ -141,8 +141,8 @@ func TestNewInitControlPlaneDiskMounts(t *testing.T) { { Device: "test-device", Layout: true, - Overwrite: pointer.Bool(false), - TableType: pointer.String("gpt"), + Overwrite: ptr.To(false), + TableType: ptr.To("gpt"), }, }, Filesystems: []bootstrapv1.Filesystem{ diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 80a4da1475df..c4c0d5a279f7 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -1026,7 +1026,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope Kind: "KubeadmConfig", Name: scope.Config.Name, UID: scope.Config.UID, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -1048,7 +1048,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name) } } - scope.Config.Status.DataSecretName = pointer.String(secret.Name) + scope.Config.Status.DataSecretName = ptr.To(secret.Name) scope.Config.Status.Ready = true conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) return nil @@ -1077,7 +1077,7 @@ func (r *KubeadmConfigReconciler) ensureBootstrapSecretOwnersRef(ctx context.Con Kind: "KubeadmConfig", UID: scope.Config.UID, Name: scope.Config.Name, - Controller: pointer.Bool(true), + Controller: ptr.To(true), })) err = patchHelper.Patch(ctx, secret) if err != nil { diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go index 8c193f169cb9..a7690421a155 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go @@ -31,7 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" bootstrapapi "k8s.io/cluster-bootstrap/token/api" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -138,7 +138,7 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi WithClusterName(clusterName). WithBootstrapTemplate(bootstrapbuilder.KubeadmConfig(metav1.NamespaceDefault, "cfg").Unstructured()). Build() - machine.Spec.Bootstrap.DataSecretName = pointer.String("something") + machine.Spec.Bootstrap.DataSecretName = ptr.To("something") config := newKubeadmConfig(metav1.NamespaceDefault, "cfg") config.SetOwnerReferences(util.EnsureOwnerRef(config.GetOwnerReferences(), metav1.OwnerReference{ @@ -211,7 +211,7 @@ func TestKubeadmConfigReconciler_TestSecretOwnerReferenceReconciliation(t *testi Kind: machine.Kind, Name: machine.Name, UID: machine.UID, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }}) g.Expect(myclient.Update(ctx, actual)).To(Succeed()) @@ -269,7 +269,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName WithClusterName("cluster1"). WithBootstrapTemplate(bootstrapbuilder.KubeadmConfig(metav1.NamespaceDefault, "cfg").Unstructured()). Build() - machine.Spec.Bootstrap.DataSecretName = pointer.String("something") + machine.Spec.Bootstrap.DataSecretName = ptr.To("something") config := newKubeadmConfig(metav1.NamespaceDefault, "cfg") addKubeadmConfigToMachine(config, machine) @@ -975,7 +975,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { Kind: "KubeadmConfig", Name: workerJoinConfig.Name, UID: workerJoinConfig.UID, - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -1617,7 +1617,7 @@ func TestKubeadmConfigReconciler_Reconcile_DynamicDefaultsForClusterConfiguratio }, machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("otherVersion"), + Version: ptr.To("otherVersion"), }, }, }, @@ -1643,7 +1643,7 @@ func TestKubeadmConfigReconciler_Reconcile_DynamicDefaultsForClusterConfiguratio }, machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("myversion"), + Version: ptr.To("myversion"), }, }, }, diff --git a/bootstrap/kubeadm/internal/ignition/clc/clc_test.go b/bootstrap/kubeadm/internal/ignition/clc/clc_test.go index 25b2ffe8be75..23ae5c53d7f7 100644 --- a/bootstrap/kubeadm/internal/ignition/clc/clc_test.go +++ b/bootstrap/kubeadm/internal/ignition/clc/clc_test.go @@ -23,7 +23,7 @@ import ( ignition "github.com/flatcar/ignition/config/v2_3" "github.com/flatcar/ignition/config/v2_3/types" "github.com/google/go-cmp/cmp" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/cloudinit" @@ -77,7 +77,7 @@ func TestRender(t *testing.T) { PostKubeadmCommands: postKubeadmCommands, KubeadmCommand: "kubeadm join", NTP: &bootstrapv1.NTP{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Servers: []string{ "foo.bar", "baz", @@ -86,13 +86,13 @@ func TestRender(t *testing.T) { Users: []bootstrapv1.User{ { Name: "foo", - Gecos: pointer.String("Foo B. Bar"), - Groups: pointer.String("foo, bar"), - HomeDir: pointer.String("/home/foo"), - Shell: pointer.String("/bin/false"), - Passwd: pointer.String("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), - PrimaryGroup: pointer.String("foo"), - Sudo: pointer.String("ALL=(ALL) NOPASSWD:ALL"), + Gecos: ptr.To("Foo B. Bar"), + Groups: ptr.To("foo, bar"), + HomeDir: ptr.To("/home/foo"), + Shell: ptr.To("/bin/false"), + Passwd: ptr.To("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), + PrimaryGroup: ptr.To("foo"), + Sudo: ptr.To("ALL=(ALL) NOPASSWD:ALL"), SSHAuthorizedKeys: []string{ "foo", "bar", @@ -104,8 +104,8 @@ func TestRender(t *testing.T) { { Device: "/dev/disk/azure/scsi1/lun0", Layout: true, - Overwrite: pointer.Bool(true), - TableType: pointer.String("gpt"), + Overwrite: ptr.To(true), + TableType: ptr.To("gpt"), }, }, Filesystems: []bootstrapv1.Filesystem{ @@ -114,7 +114,7 @@ func TestRender(t *testing.T) { Filesystem: "ext4", Label: "test_disk", ExtraOpts: []string{"-F", "-E", "lazy_itable_init=1,lazy_journal_init=1"}, - Overwrite: pointer.Bool(true), + Overwrite: ptr.To(true), }, }, }, @@ -147,7 +147,7 @@ func TestRender(t *testing.T) { }, HomeDir: "/home/foo", Name: "foo", - PasswordHash: pointer.String("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), + PasswordHash: ptr.To("$6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/"), PrimaryGroup: "foo", SSHAuthorizedKeys: []types.SSHAuthorizedKey{ "foo", @@ -175,7 +175,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,foo%20ALL%3D(ALL)%20NOPASSWD%3AALL%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -189,7 +189,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,foo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -201,7 +201,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0A%0Apre-command%0Aanother-pre-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A%0A%0Akubeadm%20join%0Amkdir%20-p%20%2Frun%2Fcluster-api%20%26%26%20echo%20success%20%3E%20%2Frun%2Fcluster-api%2Fbootstrap-success.complete%0Amv%20%2Fetc%2Fkubeadm.yml%20%2Ftmp%2F%0A%0Apost-kubeadm-command%0Aanother-post-kubeamd-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A", }, - Mode: pointer.Int(448), + Mode: ptr.To(448), }, }, { @@ -213,7 +213,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,---%0Afoo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -225,7 +225,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23%20Common%20pool%0Aserver%20foo.bar%0Aserver%20baz%0A%0A%23%20Warning%3A%20Using%20default%20NTP%20settings%20will%20leave%20your%20NTP%0A%23%20server%20accessible%20to%20all%20hosts%20on%20the%20Internet.%0A%0A%23%20If%20you%20want%20to%20deny%20all%20machines%20(including%20your%20own)%0A%23%20from%20accessing%20the%20NTP%20server%2C%20uncomment%3A%0A%23restrict%20default%20ignore%0A%0A%23%20Default%20configuration%3A%0A%23%20-%20Allow%20only%20time%20queries%2C%20at%20a%20limited%20rate%2C%20sending%20KoD%20when%20in%20excess.%0A%23%20-%20Allow%20all%20local%20queries%20(IPv4%2C%20IPv6)%0Arestrict%20default%20nomodify%20nopeer%20noquery%20notrap%20limited%20kod%0Arestrict%20127.0.0.1%0Arestrict%20%5B%3A%3A1%5D%0A", }, - Mode: pointer.Int(420), + Mode: ptr.To(420), }, }, }, @@ -234,7 +234,7 @@ func TestRender(t *testing.T) { Mount: &types.Mount{ Device: "/dev/disk/azure/scsi1/lun0", Format: "ext4", - Label: pointer.String("test_disk"), + Label: ptr.To("test_disk"), Options: []types.MountOption{ "-F", "-E", @@ -250,16 +250,16 @@ func TestRender(t *testing.T) { Units: []types.Unit{ { Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\nAfter=network.target\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Name: "kubeadm.service", }, { - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Name: "ntpd.service", }, { Contents: "[Unit]\nDescription = Mount test_disk\n\n[Mount]\nWhat=/dev/disk/azure/scsi1/lun0\nWhere=/var/lib/testdir\nOptions=foo\n\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Name: "var-lib-testdir.mount", }, }, @@ -275,11 +275,11 @@ func TestRender(t *testing.T) { Users: []bootstrapv1.User{ { Name: "foo", - LockPassword: pointer.Bool(false), + LockPassword: ptr.To(false), }, { Name: "bar", - LockPassword: pointer.Bool(false), + LockPassword: ptr.To(false), }, }, }, @@ -308,7 +308,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23%20Use%20most%20defaults%20for%20sshd%20configuration.%0ASubsystem%20sftp%20internal-sftp%0AClientAliveInterval%20180%0AUseDNS%20no%0AUsePAM%20yes%0APrintLastLog%20no%20%23%20handled%20by%20PAM%0APrintMotd%20no%20%23%20handled%20by%20PAM%0A%0AMatch%20User%20foo%2Cbar%0A%20%20PasswordAuthentication%20yes%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -320,7 +320,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0A%0Apre-command%0Aanother-pre-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A%0A%0Akubeadm%20join%0Amkdir%20-p%20%2Frun%2Fcluster-api%20%26%26%20echo%20success%20%3E%20%2Frun%2Fcluster-api%2Fbootstrap-success.complete%0Amv%20%2Fetc%2Fkubeadm.yml%20%2Ftmp%2F%0A%0Apost-kubeadm-command%0Aanother-post-kubeamd-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A", }, - Mode: pointer.Int(448), + Mode: ptr.To(448), }, }, { @@ -332,7 +332,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,---%0Afoo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, }, @@ -341,7 +341,7 @@ func TestRender(t *testing.T) { Units: []types.Unit{ { Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\nAfter=network.target\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Name: "kubeadm.service", }, }, @@ -381,7 +381,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,foo%0A"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -391,7 +391,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,foo%0A"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -403,7 +403,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0A%0Apre-command%0Aanother-pre-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A%0A%0Akubeadm%20join%0Amkdir%20-p%20%2Frun%2Fcluster-api%20%26%26%20echo%20success%20%3E%20%2Frun%2Fcluster-api%2Fbootstrap-success.complete%0Amv%20%2Fetc%2Fkubeadm.yml%20%2Ftmp%2F%0A%0Apost-kubeadm-command%0Aanother-post-kubeamd-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A", }, - Mode: pointer.Int(448), + Mode: ptr.To(448), }, }, { @@ -415,7 +415,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,---%0Afoo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, }, @@ -424,7 +424,7 @@ func TestRender(t *testing.T) { Units: []types.Unit{ { Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\nAfter=network.target\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Name: "kubeadm.service", }, }, @@ -479,7 +479,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -492,7 +492,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -505,7 +505,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -518,7 +518,7 @@ func TestRender(t *testing.T) { }, FileEmbedded1: types.FileEmbedded1{ Contents: types.FileContents{Source: "data:,"}, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, { @@ -530,7 +530,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,%23!%2Fbin%2Fbash%0Aset%20-e%0A%0Apre-command%0Aanother-pre-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A%0A%0Akubeadm%20join%0Amkdir%20-p%20%2Frun%2Fcluster-api%20%26%26%20echo%20success%20%3E%20%2Frun%2Fcluster-api%2Fbootstrap-success.complete%0Amv%20%2Fetc%2Fkubeadm.yml%20%2Ftmp%2F%0A%0Apost-kubeadm-command%0Aanother-post-kubeamd-command%0Acat%20%3C%3CEOF%20%3E%20%2Fetc%2Fmodules-load.d%2Fcontainerd.conf%0Aoverlay%0Abr_netfilter%0AEOF%0A", }, - Mode: pointer.Int(448), + Mode: ptr.To(448), }, }, { @@ -542,7 +542,7 @@ func TestRender(t *testing.T) { Contents: types.FileContents{ Source: "data:,---%0Afoo%0A", }, - Mode: pointer.Int(384), + Mode: ptr.To(384), }, }, }, @@ -551,7 +551,7 @@ func TestRender(t *testing.T) { Units: []types.Unit{ { Contents: "[Unit]\nDescription=kubeadm\n# Run only once. After successful run, this file is moved to /tmp/.\nConditionPathExists=/etc/kubeadm.yml\nAfter=network.target\n[Service]\n# To not restart the unit when it exits, as it is expected.\nType=oneshot\nExecStart=/etc/kubeadm.sh\n[Install]\nWantedBy=multi-user.target\n", - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), Name: "kubeadm.service", }, }, diff --git a/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go b/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go index 17d9c7dd3fa6..bdb6fa0eee75 100644 --- a/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go +++ b/bootstrap/kubeadm/internal/webhooks/kubeadmconfig_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -44,7 +44,7 @@ func TestKubeadmConfigDefault(t *testing.T) { Spec: bootstrapv1.KubeadmConfigSpec{}, } updateDefaultingKubeadmConfig := kubeadmConfig.DeepCopy() - updateDefaultingKubeadmConfig.Spec.Verbosity = pointer.Int32(4) + updateDefaultingKubeadmConfig.Spec.Verbosity = ptr.To[int32](4) webhook := &KubeadmConfig{} t.Run("for KubeadmConfig", util.CustomDefaultValidateTest(ctx, updateDefaultingKubeadmConfig, webhook)) @@ -192,7 +192,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Spec: bootstrapv1.KubeadmConfigSpec{ Users: []bootstrapv1.User{ { - Passwd: pointer.String("foo"), + Passwd: ptr.To("foo"), }, }, }, @@ -228,7 +228,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Users: []bootstrapv1.User{ { PasswdFrom: &bootstrapv1.PasswdSource{}, - Passwd: pointer.String("foo"), + Passwd: ptr.To("foo"), }, }, }, @@ -249,7 +249,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Key: "bar", }, }, - Passwd: pointer.String("foo"), + Passwd: ptr.To("foo"), }, }, }, @@ -270,7 +270,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Name: "foo", }, }, - Passwd: pointer.String("foo"), + Passwd: ptr.To("foo"), }, }, }, @@ -313,7 +313,7 @@ func TestKubeadmConfigValidate(t *testing.T) { Format: bootstrapv1.Ignition, Users: []bootstrapv1.User{ { - Inactive: pointer.Bool(true), + Inactive: ptr.To(true), }, }, }, @@ -332,7 +332,7 @@ func TestKubeadmConfigValidate(t *testing.T) { DiskSetup: &bootstrapv1.DiskSetup{ Partitions: []bootstrapv1.Partition{ { - TableType: pointer.String("MS-DOS"), + TableType: ptr.To("MS-DOS"), }, }, }, @@ -393,7 +393,7 @@ func TestKubeadmConfigValidate(t *testing.T) { DiskSetup: &bootstrapv1.DiskSetup{ Filesystems: []bootstrapv1.Filesystem{ { - ReplaceFS: pointer.String("ntfs"), + ReplaceFS: ptr.To("ntfs"), }, }, }, @@ -413,7 +413,7 @@ func TestKubeadmConfigValidate(t *testing.T) { DiskSetup: &bootstrapv1.DiskSetup{ Filesystems: []bootstrapv1.Filesystem{ { - Partition: pointer.String("1"), + Partition: ptr.To("1"), }, }, }, diff --git a/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go b/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go index fefcf7658f9a..215e6a6cde84 100644 --- a/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go +++ b/bootstrap/kubeadm/internal/webhooks/kubeadmconfigtemplate_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -38,7 +38,7 @@ func TestKubeadmConfigTemplateDefault(t *testing.T) { }, } updateDefaultingKubeadmConfigTemplate := kubeadmConfigTemplate.DeepCopy() - updateDefaultingKubeadmConfigTemplate.Spec.Template.Spec.Verbosity = pointer.Int32(4) + updateDefaultingKubeadmConfigTemplate.Spec.Template.Spec.Verbosity = ptr.To[int32](4) webhook := &KubeadmConfigTemplate{} t.Run("for KubeadmConfigTemplate", util.CustomDefaultValidateTest(ctx, updateDefaultingKubeadmConfigTemplate, webhook)) diff --git a/bootstrap/util/configowner_test.go b/bootstrap/util/configowner_test.go index 1bc9bac97cae..7c884403be4f 100644 --- a/bootstrap/util/configowner_test.go +++ b/bootstrap/util/configowner_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -52,9 +52,9 @@ func TestGetConfigOwner(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "my-cluster", Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("my-data-secret"), + DataSecretName: ptr.To("my-data-secret"), }, - Version: pointer.String("v1.19.6"), + Version: ptr.To("v1.19.6"), }, Status: clusterv1.MachineStatus{ InfrastructureReady: true, @@ -102,7 +102,7 @@ func TestGetConfigOwner(t *testing.T) { ClusterName: "my-cluster", Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.19.6"), + Version: ptr.To("v1.19.6"), }, }, }, @@ -254,7 +254,7 @@ func TestHasNodeRefs(t *testing.T) { Name: "machine-pool-name", }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, }, { @@ -267,7 +267,7 @@ func TestHasNodeRefs(t *testing.T) { Name: "machine-pool-name", }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: expv1.MachinePoolStatus{ NodeRefs: []corev1.ObjectReference{ @@ -326,7 +326,7 @@ func TestHasNodeRefs(t *testing.T) { Name: "machine-pool-name", }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: expv1.MachinePoolStatus{ NodeRefs: []corev1.ObjectReference{ @@ -353,7 +353,7 @@ func TestHasNodeRefs(t *testing.T) { Name: "machine-pool-name", }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), }, }, } diff --git a/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go b/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go index 9ddf27de008f..c8024600e06c 100644 --- a/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go +++ b/cmd/clusterctl/client/alpha/rollout_rollbacker_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -71,7 +71,7 @@ func Test_ObjectRollbacker(t *testing.T) { Name: "md-template", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret-name"), + DataSecretName: ptr.To("data-secret-name"), }, }, }, @@ -150,7 +150,7 @@ func Test_ObjectRollbacker(t *testing.T) { Name: "md-template-rollback", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret-name-rollback"), + DataSecretName: ptr.To("data-secret-name-rollback"), }, }, }, diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index 96fe02ee63b2..edc3077143db 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -2015,7 +2015,7 @@ func Test_createTargetObject(t *testing.T) { APIVersion: "cluster.x-k8s.io/v1beta1", }, }: { - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -2033,7 +2033,7 @@ func Test_createTargetObject(t *testing.T) { } g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred()) g.Expect(c.OwnerReferences).To(HaveLen(1)) - g.Expect(c.OwnerReferences[0].Controller).To(Equal(pointer.Bool(true))) + g.Expect(c.OwnerReferences[0].Controller).To(Equal(ptr.To(true))) }, }, { diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go index 8aaf2cde6e2f..ee45791f2839 100644 --- a/cmd/clusterctl/client/cluster/upgrader.go +++ b/cmd/clusterctl/client/cluster/upgrader.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -496,7 +496,7 @@ func scaleDownDeployment(ctx context.Context, c client.Client, deploy appsv1.Dep } // Scale down. - deployment.Spec.Replicas = pointer.Int32(0) + deployment.Spec.Replicas = ptr.To[int32](0) if err := c.Update(ctx, deployment); err != nil { return errors.Wrapf(err, "failed to update Deployment/%s", deploy.GetName()) } diff --git a/cmd/clusterctl/client/config.go b/cmd/clusterctl/client/config.go index 03e3354bfde2..cf915fa0d230 100644 --- a/cmd/clusterctl/client/config.go +++ b/cmd/clusterctl/client/config.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/version" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -404,7 +404,7 @@ func (c *clusterctlClient) templateOptionsToVariables(options GetClusterTemplate if options.ControlPlaneMachineCount == nil { // Check if set through env variable and default to 1 otherwise if v, err := c.configClient.Variables().Get("CONTROL_PLANE_MACHINE_COUNT"); err != nil { - options.ControlPlaneMachineCount = pointer.Int64(1) + options.ControlPlaneMachineCount = ptr.To[int64](1) } else { i, err := strconv.ParseInt(v, 10, 64) if err != nil { @@ -422,7 +422,7 @@ func (c *clusterctlClient) templateOptionsToVariables(options GetClusterTemplate if options.WorkerMachineCount == nil { // Check if set through env variable and default to 0 otherwise if v, err := c.configClient.Variables().Get("WORKER_MACHINE_COUNT"); err != nil { - options.WorkerMachineCount = pointer.Int64(0) + options.WorkerMachineCount = ptr.To[int64](0) } else { i, err := strconv.ParseInt(v, 10, 64) if err != nil { diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index 599cfa1c4985..14adb313eff3 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -28,7 +28,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" @@ -306,8 +306,8 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { ClusterName: "foo", TargetNamespace: "bar", KubernetesVersion: "v1.2.3", - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(2), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](2), }, }, wantVars: map[string]string{ @@ -326,8 +326,8 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { ClusterName: "foo", TargetNamespace: "bar", KubernetesVersion: "", // empty means to use value from env variables/config file - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(2), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](2), }, }, wantVars: map[string]string{ @@ -404,7 +404,7 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { ClusterName: "foo", TargetNamespace: "bar", KubernetesVersion: "v1.2.3", - ControlPlaneMachineCount: pointer.Int64(-1), + ControlPlaneMachineCount: ptr.To[int64](-1), }, }, wantErr: true, @@ -416,8 +416,8 @@ func Test_clusterctlClient_templateOptionsToVariables(t *testing.T) { ClusterName: "foo", TargetNamespace: "bar", KubernetesVersion: "v1.2.3", - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(-1), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](-1), }, }, wantErr: true, @@ -564,7 +564,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -584,7 +584,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -604,7 +604,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "", // empty triggers usage of the current namespace - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -623,7 +623,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -644,7 +644,7 @@ func Test_clusterctlClient_GetClusterTemplate(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -782,7 +782,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -802,7 +802,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, wantErr: true, @@ -817,7 +817,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -838,7 +838,7 @@ func Test_clusterctlClient_GetClusterTemplate_onEmptyCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -935,7 +935,7 @@ func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, want: templateValues{ @@ -955,7 +955,7 @@ func Test_clusterctlClient_GetClusterTemplate_withoutCluster(t *testing.T) { }, ClusterName: "test", TargetNamespace: "ns1", - ControlPlaneMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), }, }, wantErr: true, diff --git a/cmd/clusterctl/client/repository/repository_github_test.go b/cmd/clusterctl/client/repository/repository_github_test.go index cf4781e43135..6f9210dfde6f 100644 --- a/cmd/clusterctl/client/repository/repository_github_test.go +++ b/cmd/clusterctl/client/repository/repository_github_test.go @@ -27,7 +27,7 @@ import ( "github.com/google/go-github/v53/github" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -732,7 +732,7 @@ func Test_gitHubRepository_getReleaseByTag(t *testing.T) { args: args{ tag: "foo", }, - wantTagName: pointer.String("v0.4.1"), + wantTagName: ptr.To("v0.4.1"), wantErr: false, }, { diff --git a/cmd/clusterctl/cmd/util.go b/cmd/clusterctl/cmd/util.go index 2ab45cbb653c..59e1d50e9535 100644 --- a/cmd/clusterctl/cmd/util.go +++ b/cmd/clusterctl/cmd/util.go @@ -27,7 +27,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/cmd/clusterctl/client" ) @@ -74,42 +74,42 @@ func printVariablesOutput(template client.Template, options client.GetClusterTem switch name { case "CLUSTER_NAME": // Cluster name from the cmd arguments is used instead of template default. - variableMap[name] = pointer.String(options.ClusterName) + variableMap[name] = ptr.To(options.ClusterName) case "NAMESPACE": // Namespace name from the cmd flags or from the kubeconfig is used instead of template default. if options.TargetNamespace != "" { - variableMap[name] = pointer.String(options.TargetNamespace) + variableMap[name] = ptr.To(options.TargetNamespace) } else { - variableMap[name] = pointer.String("current Namespace in the KubeConfig file") + variableMap[name] = ptr.To("current Namespace in the KubeConfig file") } case "CONTROL_PLANE_MACHINE_COUNT": // Control plane machine count uses the cmd flag, env variable or a constant is used instead of template default. if options.ControlPlaneMachineCount == nil { if val, ok := os.LookupEnv("CONTROL_PLANE_MACHINE_COUNT"); ok { - variableMap[name] = pointer.String(val) + variableMap[name] = ptr.To(val) } else { - variableMap[name] = pointer.String("1") + variableMap[name] = ptr.To("1") } } else { - variableMap[name] = pointer.String(strconv.FormatInt(*options.ControlPlaneMachineCount, 10)) + variableMap[name] = ptr.To(strconv.FormatInt(*options.ControlPlaneMachineCount, 10)) } case "WORKER_MACHINE_COUNT": // Worker machine count uses the cmd flag, env variable or a constant is used instead of template default. if options.WorkerMachineCount == nil { if val, ok := os.LookupEnv("WORKER_MACHINE_COUNT"); ok { - variableMap[name] = pointer.String(val) + variableMap[name] = ptr.To(val) } else { - variableMap[name] = pointer.String("0") + variableMap[name] = ptr.To("0") } } else { - variableMap[name] = pointer.String(strconv.FormatInt(*options.WorkerMachineCount, 10)) + variableMap[name] = ptr.To(strconv.FormatInt(*options.WorkerMachineCount, 10)) } case "KUBERNETES_VERSION": // Kubernetes version uses the cmd flag, env variable, or the template default. if options.KubernetesVersion != "" { - variableMap[name] = pointer.String(options.KubernetesVersion) + variableMap[name] = ptr.To(options.KubernetesVersion) } else if val, ok := os.LookupEnv("KUBERNETES_VERSION"); ok { - variableMap[name] = pointer.String(val) + variableMap[name] = ptr.To(val) } } diff --git a/cmd/clusterctl/internal/test/fake_objects.go b/cmd/clusterctl/internal/test/fake_objects.go index a4c1aac65e3e..0299d67dd9c9 100644 --- a/cmd/clusterctl/internal/test/fake_objects.go +++ b/cmd/clusterctl/internal/test/fake_objects.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -555,7 +555,7 @@ func NewFakeInfrastructureTemplate(name string) *fakeinfrastructure.GenericInfra // - the DataSecretName contains the name of the static data secret. func NewStaticBootstrapConfig(name string) *clusterv1.Bootstrap { return &clusterv1.Bootstrap{ - DataSecretName: pointer.String(name + "-bootstrap-secret"), + DataSecretName: ptr.To(name + "-bootstrap-secret"), } } diff --git a/cmd/clusterctl/internal/test/fake_proxy.go b/cmd/clusterctl/internal/test/fake_proxy.go index c710d0ff5e75..3e421fa0e619 100644 --- a/cmd/clusterctl/internal/test/fake_proxy.go +++ b/cmd/clusterctl/internal/test/fake_proxy.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -203,7 +203,7 @@ func (f *FakeProxy) WithFakeCAPISetup() *FakeProxy { } func (f *FakeProxy) WithClusterAvailable(available bool) *FakeProxy { - f.available = pointer.Bool(available) + f.available = ptr.To(available) return f } diff --git a/cmd/clusterctl/log/logger_test.go b/cmd/clusterctl/log/logger_test.go index b5e0dee6e6b4..d1d0b31de162 100644 --- a/cmd/clusterctl/log/logger_test.go +++ b/cmd/clusterctl/log/logger_test.go @@ -21,7 +21,7 @@ import ( . "github.com/onsi/gomega" "github.com/pkg/errors" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestFlatten(t *testing.T) { @@ -115,7 +115,7 @@ func TestLoggerEnabled(t *testing.T) { }{ { name: "Return true when level is set below the threshold", - threshold: pointer.Int(5), + threshold: ptr.To(5), level: 1, want: true, }, @@ -126,7 +126,7 @@ func TestLoggerEnabled(t *testing.T) { }, { name: "Return false when level is set above the threshold", - threshold: pointer.Int(5), + threshold: ptr.To(5), level: 7, want: false, }, diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 1bf2625f61a1..94f8a62cf551 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -30,7 +30,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -929,8 +929,8 @@ func (r *KubeadmControlPlaneReconciler) adoptOwnedSecrets(ctx context.Context, k Kind: "KubeadmControlPlane", Name: kcp.Name, UID: kcp.UID, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), })) if err := r.Client.Update(ctx, ss); err != nil { diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 988ce54bea50..5975bf8dc5c4 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -38,7 +38,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "k8s.io/klog/v2/klogr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -212,7 +212,7 @@ func TestReconcileUpdateObservedGeneration(t *testing.T) { }, 10*time.Second).Should(Equal(generation)) // triggers a generation change by changing the spec - kcp.Spec.Replicas = pointer.Int32(*kcp.Spec.Replicas + 2) + kcp.Spec.Replicas = ptr.To[int32](*kcp.Spec.Replicas + 2) g.Expect(env.Update(ctx, kcp)).To(Succeed()) // read kcp.Generation after the update @@ -754,7 +754,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Kind: "KubeadmConfig", }, }, - Version: pointer.String("v1.15.0"), + Version: ptr.To("v1.15.0"), }, }, }, @@ -880,7 +880,7 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { Kind: "OtherController", Name: "name", UID: "uid", - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }) @@ -930,8 +930,8 @@ func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) { Kind: "OtherController", Name: kcp.Name, UID: kcp.UID, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, )) @@ -1497,7 +1497,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { g.Expect(env.Create(ctx, existingKubeadmConfig, client.FieldOwner("manager"))).To(Succeed()) // Existing Machine to validate in-place mutation - fd := pointer.String("fd1") + fd := ptr.To("fd1") inPlaceMutatingMachine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", @@ -1523,9 +1523,9 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { ConfigRef: bootstrapRef, }, InfrastructureRef: *infraMachineRef, - Version: pointer.String("v1.25.3"), + Version: ptr.To("v1.25.3"), FailureDomain: fd, - ProviderID: pointer.String("provider-id"), + ProviderID: ptr.To("provider-id"), NodeDrainTimeout: duration5s, NodeVolumeDetachTimeout: duration5s, NodeDeletionTimeout: duration5s, @@ -1553,7 +1553,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Namespace: namespace.Name, }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("machine-bootstrap-secret"), + DataSecretName: ptr.To("machine-bootstrap-secret"), }, }, } @@ -1587,7 +1587,7 @@ func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) { Namespace: namespace.Name, }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("machine-bootstrap-secret"), + DataSecretName: ptr.To("machine-bootstrap-secret"), }, }, } @@ -2355,7 +2355,7 @@ func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *contr APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", }, }, - Replicas: pointer.Int32(int32(3)), + Replicas: ptr.To[int32](int32(3)), Version: "v1.16.6", RolloutStrategy: &controlplanev1.RolloutStrategy{ Type: "RollingUpdate", diff --git a/controlplane/kubeadm/internal/controllers/helpers_test.go b/controlplane/kubeadm/internal/controllers/helpers_test.go index 1449618efa8b..f2110c07a76f 100644 --- a/controlplane/kubeadm/internal/controllers/helpers_test.go +++ b/controlplane/kubeadm/internal/controllers/helpers_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -525,7 +525,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { t.Run("should return the correct Machine object when creating a new Machine", func(t *testing.T) { g := NewWithT(t) - failureDomain := pointer.String("fd1") + failureDomain := ptr.To("fd1") createdMachine, err := (&KubeadmControlPlaneReconciler{}).computeDesiredMachine( kcp, cluster, infraRef, bootstrapRef, @@ -535,7 +535,7 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { expectedMachineSpec := clusterv1.MachineSpec{ ClusterName: cluster.Name, - Version: pointer.String(kcp.Spec.Version), + Version: ptr.To(kcp.Spec.Version), Bootstrap: clusterv1.Bootstrap{ ConfigRef: bootstrapRef, }, @@ -583,8 +583,8 @@ func TestKubeadmControlPlaneReconciler_computeDesiredMachine(t *testing.T) { // to verify that for an existing machine we do not override this information. existingClusterConfigurationString := "existing-cluster-configuration-information" remediationData := "remediation-data" - failureDomain := pointer.String("fd-1") - machineVersion := pointer.String("v1.25.3") + failureDomain := ptr.To("fd-1") + machineVersion := ptr.To("v1.25.3") existingMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: machineName, @@ -712,8 +712,8 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { UID: "5", Kind: "OtherController", APIVersion: clusterv1.GroupVersion.String(), - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), } // A KubeadmConfig secret created by CAPI controllers with no owner references. @@ -761,8 +761,8 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { UID: kcp.UID, Kind: kcp.Kind, APIVersion: kcp.APIVersion, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, { @@ -773,8 +773,8 @@ func TestKubeadmControlPlaneReconciler_adoptKubeconfigSecret(t *testing.T) { UID: kcp.UID, Kind: kcp.Kind, APIVersion: kcp.APIVersion, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), }, }, { diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index adb77bc0f22d..ae4902f50b00 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" - utilpointer "k8s.io/utils/pointer" + utilptr "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -161,10 +161,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilpointer.Int32(3), + MaxRetry: utilptr.To[int32](3), }, }, }, @@ -212,10 +212,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilpointer.Int32(3), + MaxRetry: utilptr.To[int32](3), }, }, }, @@ -269,10 +269,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilpointer.Int32(3), + MaxRetry: utilptr.To[int32](3), MinHealthyPeriod: &metav1.Duration{Duration: minHealthyPeriod}, }, }, @@ -325,10 +325,10 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RemediationStrategy: &controlplanev1.RemediationStrategy{ - MaxRetry: utilpointer.Int32(3), + MaxRetry: utilptr.To[int32](3), RetryPeriod: metav1.Duration{Duration: controlplanev1.DefaultMinHealthyPeriod}, // RetryPeriod not yet expired. }, }, @@ -376,7 +376,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), RolloutStrategy: &controlplanev1.RolloutStrategy{ RollingUpdate: &controlplanev1.RollingUpdate{ MaxSurge: &intstr.IntOrString{ @@ -412,7 +412,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }, Status: controlplanev1.KubeadmControlPlaneStatus{ Initialized: true, @@ -442,7 +442,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }, Status: controlplanev1.KubeadmControlPlaneStatus{ Initialized: true, @@ -486,7 +486,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(5), + Replicas: utilptr.To[int32](5), }, Status: controlplanev1.KubeadmControlPlaneStatus{ Initialized: true, @@ -529,7 +529,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -578,7 +578,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -668,7 +668,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(2), + Replicas: utilptr.To[int32](2), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -720,7 +720,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -773,7 +773,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(4), + Replicas: utilptr.To[int32](4), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -826,7 +826,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(4), + Replicas: utilptr.To[int32](4), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -869,7 +869,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -975,7 +975,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", }, Status: controlplanev1.KubeadmControlPlaneStatus{ @@ -1077,7 +1077,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RolloutStrategy: &controlplanev1.RolloutStrategy{ RollingUpdate: &controlplanev1.RollingUpdate{ @@ -1189,7 +1189,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{ Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), Version: "v1.19.1", RolloutStrategy: &controlplanev1.RolloutStrategy{ RollingUpdate: &controlplanev1.RollingUpdate{ @@ -1273,7 +1273,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(1), + Replicas: utilptr.To[int32](1), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1), @@ -1305,7 +1305,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2), @@ -1336,7 +1336,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2), @@ -1374,7 +1374,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2), @@ -1406,7 +1406,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3), @@ -1438,7 +1438,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3), @@ -1477,7 +1477,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(3), + Replicas: utilptr.To[int32](3), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3), @@ -1511,7 +1511,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(5), + Replicas: utilptr.To[int32](5), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4, m5), @@ -1545,7 +1545,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(7), + Replicas: utilptr.To[int32](7), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4, m5), @@ -1581,7 +1581,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(7), + Replicas: utilptr.To[int32](7), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4, m5, m6, m7), @@ -1617,7 +1617,7 @@ func TestCanSafelyRemoveEtcdMember(t *testing.T) { controlPlane := &internal.ControlPlane{ KCP: &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{ - Replicas: utilpointer.Int32(5), + Replicas: utilptr.To[int32](5), }}, Cluster: &clusterv1.Cluster{}, Machines: collections.FromMachines(m1, m2, m3, m4, m5, m6, m7), @@ -1713,7 +1713,7 @@ func createMachine(ctx context.Context, g *WithT, namespace, name string, option Spec: clusterv1.MachineSpec{ ClusterName: "cluster", Bootstrap: clusterv1.Bootstrap{ - DataSecretName: utilpointer.String("secret"), + DataSecretName: utilptr.To("secret"), }, }, } @@ -1741,7 +1741,7 @@ func getDeletingMachine(namespace, name string, options ...machineOption) *clust Spec: clusterv1.MachineSpec{ ClusterName: "cluster", Bootstrap: clusterv1.Bootstrap{ - DataSecretName: utilpointer.String("secret"), + DataSecretName: utilptr.To("secret"), }, }, } diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index 867b8172ce93..fbd75c926eb2 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "k8s.io/klog/v2/klogr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -358,7 +358,7 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr }, Spec: controlplanev1.KubeadmControlPlaneSpec{ Version: "v1.16.6", - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{ InfrastructureRef: corev1.ObjectReference{ APIVersion: "test/v1alpha1", diff --git a/controlplane/kubeadm/internal/controllers/upgrade_test.go b/controlplane/kubeadm/internal/controllers/upgrade_test.go index a5173b6a519e..be5939f30963 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade_test.go +++ b/controlplane/kubeadm/internal/controllers/upgrade_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -75,7 +75,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { cluster.Status.InfrastructureReady = true kcp.UID = types.UID(util.RandomString(10)) kcp.Spec.KubeadmConfigSpec.ClusterConfiguration = nil - kcp.Spec.Replicas = pointer.Int32(1) + kcp.Spec.Replicas = ptr.To[int32](1) setKCPHealthy(kcp) r := &KubeadmControlPlaneReconciler{ @@ -186,7 +186,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault) cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com1" cluster.Spec.ControlPlaneEndpoint.Port = 6443 - kcp.Spec.Replicas = pointer.Int32(3) + kcp.Spec.Replicas = ptr.To[int32](3) kcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal = 0 setKCPHealthy(kcp) diff --git a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go index cd8830e568d4..eac0bbf4200b 100644 --- a/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go +++ b/controlplane/kubeadm/internal/webhooks/kubeadm_control_plane_test.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" @@ -96,7 +96,7 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{}, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Version: "v1.19.0", RolloutStrategy: &controlplanev1.RolloutStrategy{ Type: controlplanev1.RollingUpdateStrategyType, @@ -123,10 +123,10 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { missingReplicas.Spec.Replicas = nil zeroReplicas := valid.DeepCopy() - zeroReplicas.Spec.Replicas = pointer.Int32(0) + zeroReplicas.Spec.Replicas = ptr.To[int32](0) evenReplicas := valid.DeepCopy() - evenReplicas.Spec.Replicas = pointer.Int32(2) + evenReplicas.Spec.Replicas = ptr.To[int32](2) evenReplicasExternalEtcd := evenReplicas.DeepCopy() evenReplicasExternalEtcd.Spec.KubeadmConfigSpec = bootstrapv1.KubeadmConfigSpec{ @@ -151,7 +151,7 @@ func TestKubeadmControlPlaneValidateCreate(t *testing.T) { invalidRolloutBeforeCertificateExpiryDays := valid.DeepCopy() invalidRolloutBeforeCertificateExpiryDays.Spec.RolloutBefore = &controlplanev1.RolloutBefore{ - CertificatesExpiryDays: pointer.Int32(5), // less than minimum + CertificatesExpiryDays: ptr.To[int32](5), // less than minimum } invalidIgnitionConfiguration := valid.DeepCopy() @@ -304,7 +304,7 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second}, NodeDeletionTimeout: &metav1.Duration{Duration: time.Second}, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), RolloutStrategy: &controlplanev1.RolloutStrategy{ Type: controlplanev1.RollingUpdateStrategyType, RollingUpdate: &controlplanev1.RollingUpdate{ @@ -363,19 +363,19 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { }, NTP: &bootstrapv1.NTP{ Servers: []string{"test-server-1", "test-server-2"}, - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, }, Version: "v1.16.6", RolloutBefore: &controlplanev1.RolloutBefore{ - CertificatesExpiryDays: pointer.Int32(7), + CertificatesExpiryDays: ptr.To[int32](7), }, }, } updateMaxSurgeVal := before.DeepCopy() updateMaxSurgeVal.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal = int32(0) - updateMaxSurgeVal.Spec.Replicas = pointer.Int32(3) + updateMaxSurgeVal.Spec.Replicas = ptr.To[int32](3) wrongReplicaCountForScaleIn := before.DeepCopy() wrongReplicaCountForScaleIn.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal = int32(0) @@ -433,24 +433,24 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { validUpdate.Spec.MachineTemplate.NodeDrainTimeout = &metav1.Duration{Duration: 10 * time.Second} validUpdate.Spec.MachineTemplate.NodeVolumeDetachTimeout = &metav1.Duration{Duration: 10 * time.Second} validUpdate.Spec.MachineTemplate.NodeDeletionTimeout = &metav1.Duration{Duration: 10 * time.Second} - validUpdate.Spec.Replicas = pointer.Int32(5) + validUpdate.Spec.Replicas = ptr.To[int32](5) now := metav1.NewTime(time.Now()) validUpdate.Spec.RolloutAfter = &now validUpdate.Spec.RolloutBefore = &controlplanev1.RolloutBefore{ - CertificatesExpiryDays: pointer.Int32(14), + CertificatesExpiryDays: ptr.To[int32](14), } validUpdate.Spec.RemediationStrategy = &controlplanev1.RemediationStrategy{ - MaxRetry: pointer.Int32(50), + MaxRetry: ptr.To[int32](50), MinHealthyPeriod: &metav1.Duration{Duration: 10 * time.Hour}, RetryPeriod: metav1.Duration{Duration: 10 * time.Minute}, } validUpdate.Spec.KubeadmConfigSpec.Format = bootstrapv1.CloudConfig scaleToZero := before.DeepCopy() - scaleToZero.Spec.Replicas = pointer.Int32(0) + scaleToZero.Spec.Replicas = ptr.To[int32](0) scaleToEven := before.DeepCopy() - scaleToEven.Spec.Replicas = pointer.Int32(2) + scaleToEven.Spec.Replicas = ptr.To[int32](2) invalidNamespace := before.DeepCopy() invalidNamespace.Spec.MachineTemplate.InfrastructureRef.Namespace = invalidNamespaceName @@ -613,7 +613,7 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { }, } scaleToEvenExternalEtcdCluster := beforeExternalEtcdCluster.DeepCopy() - scaleToEvenExternalEtcdCluster.Spec.Replicas = pointer.Int32(2) + scaleToEvenExternalEtcdCluster.Spec.Replicas = ptr.To[int32](2) beforeInvalidEtcdCluster := before.DeepCopy() beforeInvalidEtcdCluster.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = bootstrapv1.Etcd{ @@ -644,11 +644,11 @@ func TestKubeadmControlPlaneValidateUpdate(t *testing.T) { updateNTPServers.Spec.KubeadmConfigSpec.NTP.Servers = []string{"new-server"} disableNTPServers := before.DeepCopy() - disableNTPServers.Spec.KubeadmConfigSpec.NTP.Enabled = pointer.Bool(false) + disableNTPServers.Spec.KubeadmConfigSpec.NTP.Enabled = ptr.To(false) invalidRolloutBeforeCertificateExpiryDays := before.DeepCopy() invalidRolloutBeforeCertificateExpiryDays.Spec.RolloutBefore = &controlplanev1.RolloutBefore{ - CertificatesExpiryDays: pointer.Int32(5), // less than minimum + CertificatesExpiryDays: ptr.To[int32](5), // less than minimum } unsetRolloutBefore := before.DeepCopy() @@ -1317,7 +1317,7 @@ func TestKubeadmControlPlaneValidateUpdateAfterDefaulting(t *testing.T) { g.Expect(tt.kcp.Spec.Version).To(Equal("v1.19.0")) g.Expect(tt.kcp.Spec.RolloutStrategy.Type).To(Equal(controlplanev1.RollingUpdateStrategyType)) g.Expect(tt.kcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) - g.Expect(tt.kcp.Spec.Replicas).To(Equal(pointer.Int32(1))) + g.Expect(tt.kcp.Spec.Replicas).To(Equal(ptr.To[int32](1))) } g.Expect(warnings).To(BeEmpty()) }) diff --git a/controlplane/kubeadm/internal/webhooks/scale_test.go b/controlplane/kubeadm/internal/webhooks/scale_test.go index 6d35c1402543..3cddb1a2bd89 100644 --- a/controlplane/kubeadm/internal/webhooks/scale_test.go +++ b/controlplane/kubeadm/internal/webhooks/scale_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -62,7 +62,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { }, NodeDrainTimeout: &metav1.Duration{Duration: time.Second}, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), RolloutStrategy: &controlplanev1.RolloutStrategy{ Type: controlplanev1.RollingUpdateStrategyType, RollingUpdate: &controlplanev1.RollingUpdate{ @@ -121,7 +121,7 @@ func TestKubeadmControlPlaneValidateScale(t *testing.T) { }, NTP: &bootstrapv1.NTP{ Servers: []string{"test-server-1", "test-server-2"}, - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, }, Version: "v1.16.6", diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index 93878e189135..3a769b10d727 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/names" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -168,7 +168,7 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster * m.Status.FailureReason = &machineStatusFailure } if failureMessage != "" { - m.Status.FailureMessage = pointer.String( + m.Status.FailureMessage = ptr.To( fmt.Sprintf("Failure detected from referenced resource %v with name %q: %s", obj.GroupVersionKind(), obj.GetName(), failureMessage), ) @@ -225,7 +225,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster return ctrl.Result{}, errors.Errorf("retrieved empty dataSecretName from bootstrap provider for MachinePool %q in namespace %q", m.Name, m.Namespace) } - m.Spec.Template.Spec.Bootstrap.DataSecretName = pointer.String(secretName) + m.Spec.Template.Spec.Bootstrap.DataSecretName = ptr.To(secretName) m.Status.BootstrapReady = true return ctrl.Result{}, nil } @@ -254,7 +254,7 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu // Infra object went missing after the machine pool was up and running log.Error(err, "infrastructure reference has been deleted after being ready, setting failure state") mp.Status.FailureReason = capierrors.MachinePoolStatusErrorPtr(capierrors.InvalidConfigurationMachinePoolError) - mp.Status.FailureMessage = pointer.String(fmt.Sprintf("MachinePool infrastructure resource %v with name %q has been deleted after being ready", + mp.Status.FailureMessage = ptr.To(fmt.Sprintf("MachinePool infrastructure resource %v with name %q has been deleted after being ready", mp.Spec.Template.Spec.InfrastructureRef.GroupVersionKind(), mp.Spec.Template.Spec.InfrastructureRef.Name)) } conditions.MarkFalse(mp, clusterv1.InfrastructureReadyCondition, clusterv1.IncorrectExternalRefReason, clusterv1.ConditionSeverityError, fmt.Sprintf("could not find infra reference of kind %s with name %s", mp.Spec.Template.Spec.InfrastructureRef.Kind, mp.Spec.Template.Spec.InfrastructureRef.Name)) diff --git a/exp/internal/controllers/machinepool_controller_phases_test.go b/exp/internal/controllers/machinepool_controller_phases_test.go index 94bb4cbd3cce..4bf6c5109c47 100644 --- a/exp/internal/controllers/machinepool_controller_phases_test.go +++ b/exp/internal/controllers/machinepool_controller_phases_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -70,7 +70,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { }, Spec: expv1.MachinePoolSpec{ ClusterName: defaultCluster.Name, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -373,7 +373,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinepool.Status.ReadyReplicas = 1 // Scale up - machinepool.Spec.Replicas = pointer.Int32(5) + machinepool.Spec.Replicas = ptr.To[int32](5) r.reconcilePhase(machinepool) g.Expect(machinepool.Status.GetTypedPhase()).To(Equal(expv1.MachinePoolPhaseScalingUp)) @@ -404,7 +404,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { err = unstructured.SetNestedField(infraConfig.Object, int64(4), "status", "replicas") g.Expect(err).ToNot(HaveOccurred()) - machinepool.Spec.Replicas = pointer.Int32(4) + machinepool.Spec.Replicas = ptr.To[int32](4) // Set NodeRef. machinepool.Status.NodeRefs = []corev1.ObjectReference{ @@ -428,7 +428,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinepool.Status.ReadyReplicas = 4 // Scale down - machinepool.Spec.Replicas = pointer.Int32(1) + machinepool.Spec.Replicas = ptr.To[int32](1) r.reconcilePhase(machinepool) g.Expect(machinepool.Status.GetTypedPhase()).To(Equal(expv1.MachinePoolPhaseScalingDown)) @@ -647,7 +647,7 @@ func TestReconcileMachinePoolBootstrap(t *testing.T) { Kind: builder.TestBootstrapConfigKind, Name: "bootstrap-config1", }, - DataSecretName: pointer.String("data"), + DataSecretName: ptr.To("data"), }, }, }, @@ -686,7 +686,7 @@ func TestReconcileMachinePoolBootstrap(t *testing.T) { Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data"), + DataSecretName: ptr.To("data"), }, }, }, @@ -730,7 +730,7 @@ func TestReconcileMachinePoolBootstrap(t *testing.T) { Kind: builder.TestBootstrapConfigKind, Name: "bootstrap-config1", }, - DataSecretName: pointer.String("data"), + DataSecretName: ptr.To("data"), }, }, }, @@ -784,7 +784,7 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) { }, }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -863,7 +863,7 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) { Namespace: metav1.NamespaceDefault, }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -959,7 +959,7 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) { Namespace: metav1.NamespaceDefault, }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -1519,7 +1519,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { // Setup prerequisites - a running MachinePool with one instance and user sets Replicas to 0 // set replicas to 0 - machinepool.Spec.Replicas = pointer.Int32(0) + machinepool.Spec.Replicas = ptr.To[int32](0) // set nodeRefs to one instance machinepool.Status.NodeRefs = []corev1.ObjectReference{{Kind: "Node", Name: "machinepool-test-node"}} @@ -1580,7 +1580,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { // Setup prerequisites - a running MachinePool with one instance and user sets Replicas to 0 // set replicas to 0 - machinepool.Spec.Replicas = pointer.Int32(0) + machinepool.Spec.Replicas = ptr.To[int32](0) // set nodeRefs to one instance machinepool.Status.NodeRefs = []corev1.ObjectReference{{Kind: "Node", Name: "machinepool-test-node"}} @@ -1620,7 +1620,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { // Setup prerequisites - a running MachinePool with no instances and replicas set to 0 // set replicas to 0 - machinepool.Spec.Replicas = pointer.Int32(0) + machinepool.Spec.Replicas = ptr.To[int32](0) // set nodeRefs to no instance machinepool.Status.NodeRefs = []corev1.ObjectReference{} @@ -1654,7 +1654,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { // Setup prerequisites - a running MachinePool with no instances and replicas set to 1 // set replicas to 1 - machinepool.Spec.Replicas = pointer.Int32(1) + machinepool.Spec.Replicas = ptr.To[int32](1) // set nodeRefs to no instance machinepool.Status.NodeRefs = []corev1.ObjectReference{} @@ -1706,7 +1706,7 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { // Setup prerequisites - a running MachinePool with no refs but providerIDList and replicas set to 1 // set replicas to 1 - machinepool.Spec.Replicas = pointer.Int32(1) + machinepool.Spec.Replicas = ptr.To[int32](1) // set nodeRefs to no instance machinepool.Status.NodeRefs = []corev1.ObjectReference{} @@ -1803,7 +1803,7 @@ func getMachinePool(replicas int, mpName, clusterName, nsName string) expv1.Mach }, Spec: expv1.MachinePoolSpec{ ClusterName: clusterName, - Replicas: pointer.Int32(int32(replicas)), + Replicas: ptr.To[int32](int32(replicas)), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: clusterName, diff --git a/exp/internal/controllers/machinepool_controller_test.go b/exp/internal/controllers/machinepool_controller_test.go index bbd4a95c340e..3fad26eaf901 100644 --- a/exp/internal/controllers/machinepool_controller_test.go +++ b/exp/internal/controllers/machinepool_controller_test.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -52,7 +52,7 @@ func TestMachinePoolFinalizer(t *testing.T) { Namespace: metav1.NamespaceDefault, }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -71,7 +71,7 @@ func TestMachinePoolFinalizer(t *testing.T) { Finalizers: []string{"some-other-finalizer"}, }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -147,7 +147,7 @@ func TestMachinePoolOwnerReference(t *testing.T) { Namespace: metav1.NamespaceDefault, }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), ClusterName: "invalid", }, } @@ -158,7 +158,7 @@ func TestMachinePoolOwnerReference(t *testing.T) { Namespace: metav1.NamespaceDefault, }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -179,7 +179,7 @@ func TestMachinePoolOwnerReference(t *testing.T) { }, }, Spec: expv1.MachinePoolSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ @@ -313,7 +313,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { Spec: expv1.MachinePoolSpec{ ClusterName: "test-cluster", ProviderIDList: []string{"test://id-1"}, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ @@ -322,7 +322,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { Kind: builder.TestInfrastructureMachineTemplateKind, Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, }, }, @@ -350,7 +350,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { Spec: expv1.MachinePoolSpec{ ClusterName: "test-cluster", ProviderIDList: []string{"test://id-1"}, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ InfrastructureRef: corev1.ObjectReference{ @@ -358,7 +358,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { Kind: builder.TestInfrastructureMachineTemplateKind, Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, }, }, @@ -389,7 +389,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { }, Spec: expv1.MachinePoolSpec{ ClusterName: "test-cluster", - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ InfrastructureRef: corev1.ObjectReference{ @@ -397,7 +397,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { Kind: builder.TestInfrastructureMachineTemplateKind, Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, }, }, @@ -474,7 +474,7 @@ func TestReconcileMachinePoolDeleteExternal(t *testing.T) { }, Spec: expv1.MachinePoolSpec{ ClusterName: "test-cluster", - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ InfrastructureRef: corev1.ObjectReference{ @@ -577,7 +577,7 @@ func TestRemoveMachinePoolFinalizerAfterDeleteReconcile(t *testing.T) { }, Spec: expv1.MachinePoolSpec{ ClusterName: "test-cluster", - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ InfrastructureRef: corev1.ObjectReference{ @@ -585,7 +585,7 @@ func TestRemoveMachinePoolFinalizerAfterDeleteReconcile(t *testing.T) { Kind: builder.TestInfrastructureMachineTemplateKind, Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, }, }, @@ -654,7 +654,7 @@ func TestMachinePoolConditions(t *testing.T) { }, Spec: expv1.MachinePoolSpec{ ClusterName: "test-cluster", - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ InfrastructureRef: corev1.ObjectReference{ diff --git a/exp/internal/webhooks/machinepool.go b/exp/internal/webhooks/machinepool.go index 312e8aae546e..c13490001464 100644 --- a/exp/internal/webhooks/machinepool.go +++ b/exp/internal/webhooks/machinepool.go @@ -24,7 +24,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -65,11 +65,11 @@ func (webhook *MachinePool) Default(_ context.Context, obj runtime.Object) error m.Labels[clusterv1.ClusterNameLabel] = m.Spec.ClusterName if m.Spec.Replicas == nil { - m.Spec.Replicas = pointer.Int32(1) + m.Spec.Replicas = ptr.To[int32](1) } if m.Spec.MinReadySeconds == nil { - m.Spec.MinReadySeconds = pointer.Int32(0) + m.Spec.MinReadySeconds = ptr.To[int32](0) } if m.Spec.Template.Spec.Bootstrap.ConfigRef != nil && m.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace == "" { diff --git a/exp/internal/webhooks/machinepool_test.go b/exp/internal/webhooks/machinepool_test.go index 14e6e339edf8..ecda5dadcd42 100644 --- a/exp/internal/webhooks/machinepool_test.go +++ b/exp/internal/webhooks/machinepool_test.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -50,7 +50,7 @@ func TestMachinePoolDefault(t *testing.T) { Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ConfigRef: &corev1.ObjectReference{}}, - Version: pointer.String("1.20.0"), + Version: ptr.To("1.20.0"), }, }, }, @@ -60,11 +60,11 @@ func TestMachinePoolDefault(t *testing.T) { g.Expect(webhook.Default(ctx, mp)).To(Succeed()) g.Expect(mp.Labels[clusterv1.ClusterNameLabel]).To(Equal(mp.Spec.ClusterName)) - g.Expect(mp.Spec.Replicas).To(Equal(pointer.Int32(1))) - g.Expect(mp.Spec.MinReadySeconds).To(Equal(pointer.Int32(0))) + g.Expect(mp.Spec.Replicas).To(Equal(ptr.To[int32](1))) + g.Expect(mp.Spec.MinReadySeconds).To(Equal(ptr.To[int32](0))) g.Expect(mp.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace).To(Equal(mp.Namespace)) g.Expect(mp.Spec.Template.Spec.InfrastructureRef.Namespace).To(Equal(mp.Namespace)) - g.Expect(mp.Spec.Template.Spec.Version).To(Equal(pointer.String("v1.20.0"))) + g.Expect(mp.Spec.Template.Spec.Version).To(Equal(ptr.To("v1.20.0"))) } func TestMachinePoolBootstrapValidation(t *testing.T) { @@ -83,7 +83,7 @@ func TestMachinePoolBootstrapValidation(t *testing.T) { }, { name: "should not return error if dataSecretName is set", - bootstrap: clusterv1.Bootstrap{ConfigRef: nil, DataSecretName: pointer.String("test")}, + bootstrap: clusterv1.Bootstrap{ConfigRef: nil, DataSecretName: ptr.To("test")}, expectErr: false, }, { diff --git a/exp/ipam/internal/webhooks/ipaddress_test.go b/exp/ipam/internal/webhooks/ipaddress_test.go index 4968bc9c13ab..909c7721d8d9 100644 --- a/exp/ipam/internal/webhooks/ipaddress_test.go +++ b/exp/ipam/internal/webhooks/ipaddress_test.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -45,7 +45,7 @@ func TestIPAddressValidateCreate(t *testing.T) { PoolRef: corev1.TypedLocalObjectReference{ Kind: "TestPool", Name: "pool", - APIGroup: pointer.String("ipam.cluster.x-k8s.io"), + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), }, }, } diff --git a/exp/ipam/internal/webhooks/ipaddressclaim_test.go b/exp/ipam/internal/webhooks/ipaddressclaim_test.go index 596f92fcdc29..2f9f5025a6d0 100644 --- a/exp/ipam/internal/webhooks/ipaddressclaim_test.go +++ b/exp/ipam/internal/webhooks/ipaddressclaim_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" ) @@ -34,7 +34,7 @@ func TestIPAddressClaimValidateCreate(t *testing.T) { PoolRef: corev1.TypedLocalObjectReference{ Name: "identical", Kind: "TestPool", - APIGroup: pointer.String("ipam.cluster.x-k8s.io"), + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), }, }, } diff --git a/exp/runtime/internal/controllers/extensionconfig_controller_test.go b/exp/runtime/internal/controllers/extensionconfig_controller_test.go index 5e8da6a9e9d8..49c8cccd483f 100644 --- a/exp/runtime/internal/controllers/extensionconfig_controller_test.go +++ b/exp/runtime/internal/controllers/extensionconfig_controller_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -378,7 +378,7 @@ func fakeExtensionConfigForURL(namespace, name, url string) *runtimev1.Extension }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: pointer.String(url), + URL: ptr.To(url), }, NamespaceSelector: nil, }, diff --git a/go.mod b/go.mod index f6ff7f6c0a96..cc7985bce1ca 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( k8s.io/klog/v2 v2.100.1 k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 k8s.io/kubectl v0.28.4 - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 + k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/controller-runtime v0.16.3 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 0c2b747de712..d89d7eb995ab 100644 --- a/go.sum +++ b/go.sum @@ -856,8 +856,8 @@ k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ= k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c= k8s.io/metrics v0.28.4 h1:u36fom9+6c8jX2sk8z58H0hFaIUfrPWbXIxN7GT2blk= k8s.io/metrics v0.28.4/go.mod h1:bBqAJxH20c7wAsTQxDXOlVqxGMdce49d7WNr1WeaLac= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= +k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/hack/tools/go.mod b/hack/tools/go.mod index 1cbfde3c590e..cea414493f34 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -21,7 +21,7 @@ require ( k8s.io/klog/v2 v2.100.1 k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 k8s.io/kubectl v0.28.4 - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 + k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 sigs.k8s.io/cluster-api/test v0.0.0-00010101000000-000000000000 sigs.k8s.io/controller-runtime v0.16.3 diff --git a/hack/tools/go.sum b/hack/tools/go.sum index c0e99bcd3562..9d3e358a99b7 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -492,8 +492,8 @@ k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ= k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c= k8s.io/metrics v0.28.4 h1:u36fom9+6c8jX2sk8z58H0hFaIUfrPWbXIxN7GT2blk= k8s.io/metrics v0.28.4/go.mod h1:bBqAJxH20c7wAsTQxDXOlVqxGMdce49d7WNr1WeaLac= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= +k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/controller-tools v0.13.0 h1:NfrvuZ4bxyolhDBt/rCZhDnx3M2hzlhgo5n3Iv2RykI= diff --git a/hack/tools/internal/tilt-prepare/main.go b/hack/tools/internal/tilt-prepare/main.go index 9b914ed68621..bd97f5111259 100644 --- a/hack/tools/internal/tilt-prepare/main.go +++ b/hack/tools/internal/tilt-prepare/main.go @@ -44,7 +44,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/kustomize/api/types" @@ -75,22 +75,22 @@ var ( // which is containing "hard-coded" tilt-provider.yaml files for the providers managed in the Cluster API repository. providers = map[string]tiltProviderConfig{ "core": { - Context: pointer.String("."), + Context: ptr.To("."), }, "kubeadm-bootstrap": { - Context: pointer.String("bootstrap/kubeadm"), + Context: ptr.To("bootstrap/kubeadm"), }, "kubeadm-control-plane": { - Context: pointer.String("controlplane/kubeadm"), + Context: ptr.To("controlplane/kubeadm"), }, "docker": { - Context: pointer.String("test/infrastructure/docker"), + Context: ptr.To("test/infrastructure/docker"), }, "in-memory": { - Context: pointer.String("test/infrastructure/inmemory"), + Context: ptr.To("test/infrastructure/inmemory"), }, "test-extension": { - Context: pointer.String("test/extension"), + Context: ptr.To("test/extension"), }, } @@ -152,7 +152,7 @@ func init() { func main() { // Set clusterctl logger with a log level of 5. // This makes it easier to see what clusterctl is doing and to debug it. - logf.SetLogger(logf.NewLogger(logf.WithThreshold(pointer.Int(5)))) + logf.SetLogger(logf.NewLogger(logf.WithThreshold(ptr.To(5)))) // Set controller-runtime logger as well. ctrl.SetLogger(klog.Background()) @@ -212,19 +212,19 @@ func readTiltSettings(path string) (*tiltSettings, error) { // setTiltSettingsDefaults sets default values for tiltSettings info. func setTiltSettingsDefaults(ts *tiltSettings) { if ts.DeployCertManager == nil { - ts.DeployCertManager = pointer.Bool(true) + ts.DeployCertManager = ptr.To(true) } for k := range ts.Debug { p := ts.Debug[k] if p.Continue == nil { - p.Continue = pointer.Bool(true) + p.Continue = ptr.To(true) } if p.Port == nil { - p.Port = pointer.Int(0) + p.Port = ptr.To(0) } if p.ProfilerPort == nil { - p.ProfilerPort = pointer.Int(0) + p.ProfilerPort = ptr.To(0) } ts.Debug[k] = p @@ -358,7 +358,7 @@ func loadTiltProvider(providerRepository string) (map[string]tiltProviderConfig, } // Resolving context, that is a relative path to the repository where the tilt-provider is defined - contextPath := filepath.Join(providerRepository, pointer.StringDeref(p.Config.Context, ".")) + contextPath := filepath.Join(providerRepository, ptr.Deref(p.Config.Context, ".")) ret[p.Name] = tiltProviderConfig{ Context: &contextPath, @@ -929,7 +929,7 @@ func getProviderObj(version *string) func(prefix string, objs []unstructured.Uns }, ProviderName: providerName, Type: providerType, - Version: pointer.StringDeref(version, defaultProviderVersion), + Version: ptr.Deref(version, defaultProviderVersion), } providerObj := &unstructured.Unstructured{} diff --git a/internal/contract/controlplane.go b/internal/contract/controlplane.go index 7bbc306e7997..53e0a911cb7c 100644 --- a/internal/contract/controlplane.go +++ b/internal/contract/controlplane.go @@ -22,7 +22,7 @@ import ( "github.com/blang/semver/v4" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/util/version" ) @@ -259,7 +259,7 @@ func (c *ControlPlaneContract) IsScaling(obj *unstructured.Unstructured) (bool, // * This is because the patchHelper marshals before/after object to JSON to calculate a diff // and as the unavailableReplicas field is not a pointer, not set and 0 are both rendered as 0. // If before/after of the field is the same (i.e. 0), there is no diff and thus also no patch to set it to 0. - unavailableReplicas = pointer.Int64(0) + unavailableReplicas = ptr.To[int64](0) } // Control plane is still scaling if: diff --git a/internal/controllers/cluster/cluster_controller_phases.go b/internal/controllers/cluster/cluster_controller_phases.go index 4266a212d04e..c70d3389d4bd 100644 --- a/internal/controllers/cluster/cluster_controller_phases.go +++ b/internal/controllers/cluster/cluster_controller_phases.go @@ -24,7 +24,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -68,7 +68,7 @@ func (r *Reconciler) reconcilePhase(_ context.Context, cluster *clusterv1.Cluste if preReconcilePhase != cluster.Status.GetTypedPhase() { // Failed clusters should get a Warning event if cluster.Status.GetTypedPhase() == clusterv1.ClusterPhaseFailed { - r.recorder.Eventf(cluster, corev1.EventTypeWarning, string(cluster.Status.GetTypedPhase()), "Cluster %s is %s: %s", cluster.Name, string(cluster.Status.GetTypedPhase()), pointer.StringDeref(cluster.Status.FailureMessage, "unknown")) + r.recorder.Eventf(cluster, corev1.EventTypeWarning, string(cluster.Status.GetTypedPhase()), "Cluster %s is %s: %s", cluster.Name, string(cluster.Status.GetTypedPhase()), ptr.Deref(cluster.Status.FailureMessage, "unknown")) } else { r.recorder.Eventf(cluster, corev1.EventTypeNormal, string(cluster.Status.GetTypedPhase()), "Cluster %s is %s", cluster.Name, string(cluster.Status.GetTypedPhase())) } @@ -137,7 +137,7 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C cluster.Status.FailureReason = &clusterStatusError } if failureMessage != "" { - cluster.Status.FailureMessage = pointer.String( + cluster.Status.FailureMessage = ptr.To( fmt.Sprintf("Failure detected from referenced resource %v with name %q: %s", obj.GroupVersionKind(), obj.GetName(), failureMessage), ) diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index f6a35071c5a0..7e505b7d13b8 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -24,7 +24,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -315,13 +315,13 @@ func TestClusterReconciler(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: cluster.Name, - ProviderID: pointer.String("aws:///id-node-1"), + ProviderID: ptr.To("aws:///id-node-1"), Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String(""), + DataSecretName: ptr.To(""), }, }, } - machine.Spec.Bootstrap.DataSecretName = pointer.String("test6-bootstrapdata") + machine.Spec.Bootstrap.DataSecretName = ptr.To("test6-bootstrapdata") g.Expect(env.Create(ctx, machine)).To(Succeed()) key = client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} defer func() { diff --git a/internal/controllers/clusterclass/clusterclass_controller_test.go b/internal/controllers/clusterclass/clusterclass_controller_test.go index 1c972f1ab1f2..cf1614c39dd4 100644 --- a/internal/controllers/clusterclass/clusterclass_controller_test.go +++ b/internal/controllers/clusterclass/clusterclass_controller_test.go @@ -30,7 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -394,7 +394,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: pointer.String("variables-one"), + DiscoverVariablesExtension: ptr.To("variables-one"), }}}). Build(), patchResponse: &runtimehooksv1.DiscoverVariablesResponse{ @@ -497,7 +497,7 @@ func TestReconciler_reconcileVariables(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - DiscoverVariablesExtension: pointer.String("variables-one"), + DiscoverVariablesExtension: ptr.To("variables-one"), }}}). Build(), patchResponse: &runtimehooksv1.DiscoverVariablesResponse{ @@ -579,18 +579,18 @@ func TestReconciler_extensionConfigToClusterClass(t *testing.T) { // These ClusterClasses will be reconciled as they both reference the passed ExtensionConfig `runtime1`. onePatchClusterClass := builder.ClusterClass(metav1.NamespaceDefault, "cc1"). WithPatches([]clusterv1.ClusterClassPatch{ - {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: pointer.String("discover-variables.runtime1")}}}). + {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: ptr.To("discover-variables.runtime1")}}}). Build() twoPatchClusterClass := builder.ClusterClass(metav1.NamespaceDefault, "cc2"). WithPatches([]clusterv1.ClusterClassPatch{ - {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: pointer.String("discover-variables.runtime1")}}, - {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: pointer.String("discover-variables.runtime2")}}}). + {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: ptr.To("discover-variables.runtime1")}}, + {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: ptr.To("discover-variables.runtime2")}}}). Build() // This ClusterClasses will not be reconciled as it does not reference the passed ExtensionConfig `runtime1`. notReconciledClusterClass := builder.ClusterClass(metav1.NamespaceDefault, "cc3"). WithPatches([]clusterv1.ClusterClassPatch{ - {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: pointer.String("discover-variables.other-runtime-class")}}}). + {External: &clusterv1.ExternalPatchDefinition{DiscoverVariablesExtension: ptr.To("discover-variables.other-runtime-class")}}}). Build() t.Run("test", func(t *testing.T) { diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index 0bcaaa1d060d..70a9d0853f75 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -26,7 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -239,7 +239,7 @@ func TestNodeLabelSync(t *testing.T) { machine := defaultMachine.DeepCopy() machine.Namespace = ns.Name - machine.Spec.ProviderID = pointer.String(nodeProviderID) + machine.Spec.ProviderID = ptr.To(nodeProviderID) // Set Machine labels. machine.Labels = map[string]string{} diff --git a/internal/controllers/machine/machine_controller_phases.go b/internal/controllers/machine/machine_controller_phases.go index 5e34f8904444..f24d38f02f9c 100644 --- a/internal/controllers/machine/machine_controller_phases.go +++ b/internal/controllers/machine/machine_controller_phases.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -156,7 +156,7 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C m.Status.FailureReason = &machineStatusError } if failureMessage != "" { - m.Status.FailureMessage = pointer.String( + m.Status.FailureMessage = ptr.To( fmt.Sprintf("Failure detected from referenced resource %v with name %q: %s", obj.GroupVersionKind(), obj.GetName(), failureMessage), ) @@ -230,7 +230,7 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, s *scope) (ctrl.Res } else if secretName == "" { return ctrl.Result{}, errors.Errorf("retrieved empty dataSecretName from bootstrap provider for Machine %q in namespace %q", m.Name, m.Namespace) } - m.Spec.Bootstrap.DataSecretName = pointer.String(secretName) + m.Spec.Bootstrap.DataSecretName = ptr.To(secretName) if !m.Status.BootstrapReady { log.Info("Bootstrap provider generated data secret and reports status.ready", bootstrapConfig.GetKind(), klog.KObj(bootstrapConfig), "Secret", klog.KRef(m.Namespace, secretName)) } @@ -255,7 +255,7 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr if m.Status.InfrastructureReady { log.Error(err, "Machine infrastructure reference has been deleted after being ready, setting failure state") m.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.InvalidConfigurationMachineError) - m.Status.FailureMessage = pointer.String(fmt.Sprintf("Machine infrastructure resource %v with name %q has been deleted after being ready", + m.Status.FailureMessage = ptr.To(fmt.Sprintf("Machine infrastructure resource %v with name %q has been deleted after being ready", m.Spec.InfrastructureRef.GroupVersionKind(), m.Spec.InfrastructureRef.Name)) return ctrl.Result{}, errors.Errorf("could not find %v %q for Machine %q in namespace %q, requeuing", m.Spec.InfrastructureRef.GroupVersionKind().String(), m.Spec.InfrastructureRef.Name, m.Name, m.Namespace) } @@ -315,10 +315,10 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr case err != nil: return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve failure domain from infrastructure provider for Machine %q in namespace %q", m.Name, m.Namespace) default: - m.Spec.FailureDomain = pointer.String(failureDomain) + m.Spec.FailureDomain = ptr.To(failureDomain) } - m.Spec.ProviderID = pointer.String(providerID) + m.Spec.ProviderID = ptr.To(providerID) return ctrl.Result{}, nil } diff --git a/internal/controllers/machine/machine_controller_phases_test.go b/internal/controllers/machine/machine_controller_phases_test.go index efbff31dab1e..7334716d0f7c 100644 --- a/internal/controllers/machine/machine_controller_phases_test.go +++ b/internal/controllers/machine/machine_controller_phases_test.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -495,7 +495,7 @@ func TestReconcileMachinePhases(t *testing.T) { machine := defaultMachine.DeepCopy() machine.Namespace = ns.Name // Set Machine ProviderID. - machine.Spec.ProviderID = pointer.String(nodeProviderID) + machine.Spec.ProviderID = ptr.To(nodeProviderID) g.Expect(env.Create(ctx, cluster)).To(Succeed()) defaultKubeconfigSecret = kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster)) @@ -781,7 +781,7 @@ func TestReconcileBootstrap(t *testing.T) { Kind: "GenericBootstrapConfig", Name: "bootstrap-config1", }, - DataSecretName: pointer.String("secret-data"), + DataSecretName: ptr.To("secret-data"), }, }, Status: clusterv1.MachineStatus{ diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index 62ecfcba8245..c3cd3566dff8 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -456,7 +456,7 @@ func TestMachineOwnerReference(t *testing.T) { APIVersion: clusterv1.GroupVersion.String(), Kind: "MachineSet", Name: "valid-machineset", - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -481,7 +481,7 @@ func TestMachineOwnerReference(t *testing.T) { APIVersion: "test.group", Kind: "KubeadmControlPlane", Name: "valid-controlplane", - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -525,7 +525,7 @@ func TestMachineOwnerReference(t *testing.T) { APIVersion: clusterv1.GroupVersion.String(), Kind: "MachineSet", Name: "valid-machineset", - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -540,7 +540,7 @@ func TestMachineOwnerReference(t *testing.T) { APIVersion: "test.group", Kind: "KubeadmControlPlane", Name: "valid-controlplane", - Controller: pointer.Bool(true), + Controller: ptr.To(true), }, }, }, @@ -647,7 +647,7 @@ func TestReconcileRequest(t *testing.T) { Kind: "GenericInfrastructureMachine", Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -675,7 +675,7 @@ func TestReconcileRequest(t *testing.T) { Kind: "GenericInfrastructureMachine", Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -707,7 +707,7 @@ func TestReconcileRequest(t *testing.T) { Kind: "GenericInfrastructureMachine", Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, }, expected: expected{ @@ -812,7 +812,7 @@ func TestMachineConditions(t *testing.T) { Finalizers: []string{clusterv1.MachineFinalizer}, }, Spec: clusterv1.MachineSpec{ - ProviderID: pointer.String("test://id-1"), + ProviderID: ptr.To("test://id-1"), ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", @@ -1110,7 +1110,7 @@ func TestRemoveMachineFinalizerAfterDeleteReconcile(t *testing.T) { Kind: "GenericInfrastructureMachine", Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, } key := client.ObjectKey{Namespace: m.Namespace, Name: m.Name} @@ -1150,7 +1150,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{}, }, @@ -1167,7 +1167,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, }, @@ -1194,7 +1194,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, }, Status: clusterv1.MachineStatus{ @@ -1220,7 +1220,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ Conditions: clusterv1.Conditions{ @@ -1277,7 +1277,7 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{}, }, @@ -1294,7 +1294,7 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 30}, }, @@ -1321,7 +1321,7 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 60}, }, Status: clusterv1.MachineStatus{ @@ -1347,7 +1347,7 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ Conditions: clusterv1.Conditions{ @@ -1410,7 +1410,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{}, }, @@ -1436,7 +1436,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -1468,7 +1468,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -1498,7 +1498,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -1549,7 +1549,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -1587,7 +1587,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -1625,7 +1625,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -1689,7 +1689,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -1709,7 +1709,7 @@ func TestIsDeleteNodeAllowed(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -2061,7 +2061,7 @@ func TestNodeDeletion(t *testing.T) { Kind: "GenericInfrastructureMachine", Name: "infra-config1", }, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ @@ -2083,7 +2083,7 @@ func TestNodeDeletion(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", InfrastructureRef: corev1.ObjectReference{}, - Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ NodeRef: &corev1.ObjectReference{ diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index 94161271a893..8c6015511d3d 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -90,9 +90,9 @@ func TestMachineDeploymentReconciler(t *testing.T) { }, Spec: clusterv1.MachineDeploymentSpec{ ClusterName: testCluster.Name, - MinReadySeconds: pointer.Int32(0), - Replicas: pointer.Int32(2), - RevisionHistoryLimit: pointer.Int32(0), + MinReadySeconds: ptr.To[int32](0), + Replicas: ptr.To[int32](2), + RevisionHistoryLimit: ptr.To[int32](0), Selector: metav1.LabelSelector{ // We're using the same labels for spec.selector and spec.template.labels. // The labels are later changed and we will use the initial labels later to @@ -104,7 +104,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ MaxUnavailable: intOrStrPtr(0), MaxSurge: intOrStrPtr(1), - DeletePolicy: pointer.String("Oldest"), + DeletePolicy: ptr.To("Oldest"), }, }, Template: clusterv1.MachineTemplateSpec{ @@ -120,7 +120,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { Name: "md-template", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret-name"), + DataSecretName: ptr.To("data-secret-name"), }, }, }, @@ -256,7 +256,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { t.Log("Scaling the MachineDeployment to 3 replicas") desiredMachineDeploymentReplicas := int32(3) modifyFunc := func(d *clusterv1.MachineDeployment) { - d.Spec.Replicas = pointer.Int32(desiredMachineDeploymentReplicas) + d.Spec.Replicas = ptr.To[int32](desiredMachineDeploymentReplicas) } g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) g.Eventually(func() int { @@ -365,7 +365,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { // expect the Reconcile to be called and the MachineSet to be updated in-place. t.Log("Updating deletePolicy on the MachineDeployment") modifyFunc = func(d *clusterv1.MachineDeployment) { - d.Spec.Strategy.RollingUpdate.DeletePolicy = pointer.String("Newest") + d.Spec.Strategy.RollingUpdate.DeletePolicy = ptr.To("Newest") } g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) g.Eventually(func(g Gomega) { @@ -514,9 +514,9 @@ func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testi Spec: clusterv1.MachineDeploymentSpec{ Paused: true, // Set this to true as we do not want to test the other parts of the reconciler in this test. ClusterName: testCluster.Name, - MinReadySeconds: pointer.Int32(0), - Replicas: pointer.Int32(2), - RevisionHistoryLimit: pointer.Int32(0), + MinReadySeconds: ptr.To[int32](0), + Replicas: ptr.To[int32](2), + RevisionHistoryLimit: ptr.To[int32](0), Selector: metav1.LabelSelector{ // We're using the same labels for spec.selector and spec.template.labels. MatchLabels: labels, @@ -526,7 +526,7 @@ func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testi RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ MaxUnavailable: intOrStrPtr(0), MaxSurge: intOrStrPtr(1), - DeletePolicy: pointer.String("Oldest"), + DeletePolicy: ptr.To("Oldest"), }, }, Template: clusterv1.MachineTemplateSpec{ @@ -542,7 +542,7 @@ func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testi Name: "md-template", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret-name"), + DataSecretName: ptr.To("data-secret-name"), }, }, }, @@ -595,7 +595,7 @@ func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testi }, Spec: clusterv1.MachineSetSpec{ ClusterName: testCluster.Name, - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), MinReadySeconds: 0, Selector: metav1.LabelSelector{ MatchLabels: labels, @@ -612,7 +612,7 @@ func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testi Name: "md-template", }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret-name"), + DataSecretName: ptr.To("data-secret-name"), }, Version: &version, }, diff --git a/internal/controllers/machinedeployment/machinedeployment_rolling_test.go b/internal/controllers/machinedeployment/machinedeployment_rolling_test.go index 5a4d3895c580..745e122b72b3 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rolling_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_rolling_test.go @@ -24,7 +24,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -51,7 +51,7 @@ func TestReconcileNewMachineSet(t *testing.T) { }, newMachineSet: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, error: errors.Errorf("spec.replicas for MachineDeployment foo/bar is nil, this is unexpected"), @@ -60,7 +60,7 @@ func TestReconcileNewMachineSet(t *testing.T) { name: "It fails when new machineSet has no replicas", machineDeployment: &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, newMachineSet: &clusterv1.MachineSet{ @@ -86,7 +86,7 @@ func TestReconcileNewMachineSet(t *testing.T) { MaxSurge: intOrStrPtr(2), }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, newMachineSet: &clusterv1.MachineSet{ @@ -95,7 +95,7 @@ func TestReconcileNewMachineSet(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), }, }, expectedNewMachineSetReplicas: 2, @@ -115,7 +115,7 @@ func TestReconcileNewMachineSet(t *testing.T) { MaxSurge: intOrStrPtr(2), }, }, - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), }, }, newMachineSet: &clusterv1.MachineSet{ @@ -124,7 +124,7 @@ func TestReconcileNewMachineSet(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, expectedNewMachineSetReplicas: 0, @@ -144,7 +144,7 @@ func TestReconcileNewMachineSet(t *testing.T) { MaxSurge: intOrStrPtr(2), }, }, - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), }, }, newMachineSet: &clusterv1.MachineSet{ @@ -153,7 +153,7 @@ func TestReconcileNewMachineSet(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, }, expectedNewMachineSetReplicas: 2, @@ -164,7 +164,7 @@ func TestReconcileNewMachineSet(t *testing.T) { Name: "3replicas", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), }, Status: clusterv1.MachineSetStatus{ Replicas: 3, @@ -188,7 +188,7 @@ func TestReconcileNewMachineSet(t *testing.T) { MaxSurge: intOrStrPtr(0), }, }, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, }, newMachineSet: &clusterv1.MachineSet{ @@ -197,7 +197,7 @@ func TestReconcileNewMachineSet(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), }, }, expectedNewMachineSetReplicas: 0, @@ -208,7 +208,7 @@ func TestReconcileNewMachineSet(t *testing.T) { Name: "machine-not-yet-deleted", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), }, Status: clusterv1.MachineSetStatus{ Replicas: 1, @@ -282,7 +282,7 @@ func TestReconcileOldMachineSets(t *testing.T) { }, newMachineSet: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, error: errors.Errorf("spec.replicas for MachineDeployment foo/bar is nil, this is unexpected"), @@ -291,7 +291,7 @@ func TestReconcileOldMachineSets(t *testing.T) { name: "It fails when new machineSet has no replicas", machineDeployment: &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, newMachineSet: &clusterv1.MachineSet{ @@ -317,7 +317,7 @@ func TestReconcileOldMachineSets(t *testing.T) { MaxSurge: intOrStrPtr(3), }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, newMachineSet: &clusterv1.MachineSet{ @@ -326,7 +326,7 @@ func TestReconcileOldMachineSets(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), }, Status: clusterv1.MachineSetStatus{ AvailableReplicas: 2, @@ -339,7 +339,7 @@ func TestReconcileOldMachineSets(t *testing.T) { Name: "2replicas", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ AvailableReplicas: 2, @@ -351,7 +351,7 @@ func TestReconcileOldMachineSets(t *testing.T) { Name: "1replicas", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, Status: clusterv1.MachineSetStatus{ AvailableReplicas: 1, @@ -375,7 +375,7 @@ func TestReconcileOldMachineSets(t *testing.T) { MaxSurge: intOrStrPtr(3), }, }, - Replicas: pointer.Int32(10), + Replicas: ptr.To[int32](10), }, }, newMachineSet: &clusterv1.MachineSet{ @@ -384,7 +384,7 @@ func TestReconcileOldMachineSets(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(5), + Replicas: ptr.To[int32](5), }, Status: clusterv1.MachineSetStatus{ Replicas: 5, @@ -399,7 +399,7 @@ func TestReconcileOldMachineSets(t *testing.T) { Name: "8replicas", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(8), + Replicas: ptr.To[int32](8), }, Status: clusterv1.MachineSetStatus{ Replicas: 10, diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 5482c1a6debb..2d5df9f852da 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -34,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -334,9 +334,9 @@ func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeplo desiredMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations) // Set all other in-place mutable fields. - desiredMS.Spec.MinReadySeconds = pointer.Int32Deref(deployment.Spec.MinReadySeconds, 0) + desiredMS.Spec.MinReadySeconds = ptr.Deref(deployment.Spec.MinReadySeconds, 0) if deployment.Spec.Strategy != nil && deployment.Spec.Strategy.RollingUpdate != nil { - desiredMS.Spec.DeletePolicy = pointer.StringDeref(deployment.Spec.Strategy.RollingUpdate.DeletePolicy, "") + desiredMS.Spec.DeletePolicy = ptr.Deref(deployment.Spec.Strategy.RollingUpdate.DeletePolicy, "") } else { desiredMS.Spec.DeletePolicy = "" } diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index 5aa03be75c94..d8a99f12cb13 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -31,7 +31,7 @@ import ( apirand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/record" "k8s.io/klog/v2/klogr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -53,7 +53,7 @@ func TestCalculateStatus(t *testing.T) { "all machines are running": { machineSets: []*clusterv1.MachineSet{{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ Selector: "", @@ -65,7 +65,7 @@ func TestCalculateStatus(t *testing.T) { }}, newMachineSet: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ Selector: "", @@ -80,7 +80,7 @@ func TestCalculateStatus(t *testing.T) { Generation: 2, }, Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, expectedStatus: clusterv1.MachineDeploymentStatus{ @@ -96,7 +96,7 @@ func TestCalculateStatus(t *testing.T) { "scaling up": { machineSets: []*clusterv1.MachineSet{{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ Selector: "", @@ -108,7 +108,7 @@ func TestCalculateStatus(t *testing.T) { }}, newMachineSet: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ Selector: "", @@ -123,7 +123,7 @@ func TestCalculateStatus(t *testing.T) { Generation: 2, }, Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, expectedStatus: clusterv1.MachineDeploymentStatus{ @@ -139,7 +139,7 @@ func TestCalculateStatus(t *testing.T) { "scaling down": { machineSets: []*clusterv1.MachineSet{{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ Selector: "", @@ -151,7 +151,7 @@ func TestCalculateStatus(t *testing.T) { }}, newMachineSet: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ Selector: "", @@ -166,7 +166,7 @@ func TestCalculateStatus(t *testing.T) { Generation: 2, }, Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, expectedStatus: clusterv1.MachineDeploymentStatus{ @@ -182,7 +182,7 @@ func TestCalculateStatus(t *testing.T) { "MachineSet failed": { machineSets: []*clusterv1.MachineSet{{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ Selector: "", @@ -195,7 +195,7 @@ func TestCalculateStatus(t *testing.T) { }}, newMachineSet: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Status: clusterv1.MachineSetStatus{ Selector: "", @@ -210,7 +210,7 @@ func TestCalculateStatus(t *testing.T) { Generation: 2, }, Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, expectedStatus: clusterv1.MachineDeploymentStatus{ @@ -247,7 +247,7 @@ func TestScaleMachineSet(t *testing.T) { name: "It fails when new MachineSet has no replicas", machineDeployment: &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, machineSet: &clusterv1.MachineSet{ @@ -273,7 +273,7 @@ func TestScaleMachineSet(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, error: errors.Errorf("spec.replicas for MachineDeployment foo/bar is nil, this is unexpected"), @@ -293,7 +293,7 @@ func TestScaleMachineSet(t *testing.T) { MaxSurge: intOrStrPtr(2), }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, machineSet: &clusterv1.MachineSet{ @@ -302,7 +302,7 @@ func TestScaleMachineSet(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(0), + Replicas: ptr.To[int32](0), }, }, newScale: 2, @@ -322,7 +322,7 @@ func TestScaleMachineSet(t *testing.T) { MaxSurge: intOrStrPtr(2), }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, machineSet: &clusterv1.MachineSet{ @@ -331,7 +331,7 @@ func TestScaleMachineSet(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(4), + Replicas: ptr.To[int32](4), }, }, newScale: 2, @@ -351,7 +351,7 @@ func TestScaleMachineSet(t *testing.T) { MaxSurge: intOrStrPtr(2), }, }, - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, machineSet: &clusterv1.MachineSet{ @@ -360,7 +360,7 @@ func TestScaleMachineSet(t *testing.T) { Name: "bar", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, newScale: 2, @@ -416,7 +416,7 @@ func newTestMachineDeployment(pds *int32, replicas, statusReplicas, updatedRepli RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ MaxUnavailable: intOrStrPtr(0), MaxSurge: intOrStrPtr(1), - DeletePolicy: pointer.String("Oldest"), + DeletePolicy: ptr.To("Oldest"), }, }, }, @@ -439,7 +439,7 @@ func newTestMachinesetWithReplicas(name string, specReplicas, statusReplicas, av Namespace: metav1.NamespaceDefault, }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(specReplicas), + Replicas: ptr.To[int32](specReplicas), }, Status: clusterv1.MachineSetStatus{ AvailableReplicas: availableReplicas, @@ -523,13 +523,13 @@ func TestComputeDesiredMachineSet(t *testing.T) { }, Spec: clusterv1.MachineDeploymentSpec{ ClusterName: "test-cluster", - Replicas: pointer.Int32(3), - MinReadySeconds: pointer.Int32(10), + Replicas: ptr.To[int32](3), + MinReadySeconds: ptr.To[int32](10), Strategy: &clusterv1.MachineDeploymentStrategy{ Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ MaxSurge: intOrStrPtr(1), - DeletePolicy: pointer.String("Random"), + DeletePolicy: ptr.To("Random"), MaxUnavailable: intOrStrPtr(0), }, }, @@ -542,7 +542,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { Annotations: map[string]string{"machine-annotation1": "machine-value1"}, }, Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.25.3"), + Version: ptr.To("v1.25.3"), InfrastructureRef: infraRef, Bootstrap: clusterv1.Bootstrap{ ConfigRef: &bootstrapRef, @@ -565,7 +565,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { }, Spec: clusterv1.MachineSetSpec{ ClusterName: "test-cluster", - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), MinReadySeconds: 10, DeletePolicy: string(clusterv1.RandomMachineSetDeletePolicy), Selector: metav1.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, @@ -584,10 +584,10 @@ func TestComputeDesiredMachineSet(t *testing.T) { t.Run("should compute a new MachineSet when old MachineSets exist", func(t *testing.T) { oldMS := skeletonMSBasedOnMD.DeepCopy() - oldMS.Spec.Replicas = pointer.Int32(2) + oldMS.Spec.Replicas = ptr.To[int32](2) expectedMS := skeletonMSBasedOnMD.DeepCopy() - expectedMS.Spec.Replicas = pointer.Int32(2) // 4 (maxsurge+replicas) - 2 (replicas of old ms) = 2 + expectedMS.Spec.Replicas = ptr.To[int32](2) // 4 (maxsurge+replicas) - 2 (replicas of old ms) = 2 g := NewWithT(t) actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, []*clusterv1.MachineSet{oldMS}, log) @@ -655,7 +655,7 @@ func TestComputeDesiredMachineSet(t *testing.T) { existingMS.Spec.MinReadySeconds = 0 oldMS := skeletonMSBasedOnMD.DeepCopy() - oldMS.Spec.Replicas = pointer.Int32(2) + oldMS.Spec.Replicas = ptr.To[int32](2) // Note: computeDesiredMachineSet does not modify the replicas on the updated MachineSet. // Therefore, even though we have the old machineset with replicas 2 the updatedMS does not diff --git a/internal/controllers/machinedeployment/mdutil/util_test.go b/internal/controllers/machinedeployment/mdutil/util_test.go index 8154e1dd52c7..3062c1626461 100644 --- a/internal/controllers/machinedeployment/mdutil/util_test.go +++ b/internal/controllers/machinedeployment/mdutil/util_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apiserver/pkg/storage/names" "k8s.io/klog/v2/klogr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -78,7 +78,7 @@ func generateDeployment(image string) clusterv1.MachineDeployment { Annotations: make(map[string]string), }, Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Selector: metav1.LabelSelector{MatchLabels: machineLabels}, Template: clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{ @@ -101,7 +101,7 @@ func TestMachineSetsByDecreasingReplicas(t *testing.T) { Name: "ms-a", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, } @@ -111,7 +111,7 @@ func TestMachineSetsByDecreasingReplicas(t *testing.T) { Name: "ms-aa", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), }, } @@ -121,7 +121,7 @@ func TestMachineSetsByDecreasingReplicas(t *testing.T) { Name: "ms-b", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, } @@ -131,7 +131,7 @@ func TestMachineSetsByDecreasingReplicas(t *testing.T) { Name: "ms-a", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, } @@ -316,7 +316,7 @@ func TestFindNewMachineSet(t *testing.T) { matchingMS := generateMS(deployment) matchingMSHigherReplicas := generateMS(deployment) - matchingMSHigherReplicas.Spec.Replicas = pointer.Int32(2) + matchingMSHigherReplicas.Spec.Replicas = ptr.To[int32](2) matchingMSDiffersInPlaceMutableFields := generateMS(deployment) matchingMSDiffersInPlaceMutableFields.Spec.Template.Spec.NodeDrainTimeout = &metav1.Duration{Duration: 20 * time.Second} @@ -407,13 +407,13 @@ func TestFindOldMachineSets(t *testing.T) { newMS := generateMS(deployment) newMS.Name = "aa" - newMS.Spec.Replicas = pointer.Int32(1) + newMS.Spec.Replicas = ptr.To[int32](1) newMSHigherReplicas := generateMS(deployment) - newMSHigherReplicas.Spec.Replicas = pointer.Int32(2) + newMSHigherReplicas.Spec.Replicas = ptr.To[int32](2) newMSHigherName := generateMS(deployment) - newMSHigherName.Spec.Replicas = pointer.Int32(1) + newMSHigherName.Spec.Replicas = ptr.To[int32](1) newMSHigherName.Name = "ab" oldDeployment := generateDeployment("nginx") @@ -798,7 +798,7 @@ func TestMaxUnavailable(t *testing.T) { func TestAnnotationUtils(t *testing.T) { // Setup tDeployment := generateDeployment("nginx") - tDeployment.Spec.Replicas = pointer.Int32(1) + tDeployment.Spec.Replicas = ptr.To[int32](1) tMS := generateMS(tDeployment) // Test Case 1: Check if annotations are set properly @@ -825,7 +825,7 @@ func TestAnnotationUtils(t *testing.T) { func TestComputeMachineSetAnnotations(t *testing.T) { deployment := generateDeployment("nginx") - deployment.Spec.Replicas = pointer.Int32(3) + deployment.Spec.Replicas = ptr.To[int32](3) maxSurge := intstr.FromInt(1) maxUnavailable := intstr.FromInt(0) deployment.Spec.Strategy = &clusterv1.MachineDeploymentStrategy{ diff --git a/internal/controllers/machinedeployment/suite_test.go b/internal/controllers/machinedeployment/suite_test.go index 8a49c8cb0847..af0b52b175ab 100644 --- a/internal/controllers/machinedeployment/suite_test.go +++ b/internal/controllers/machinedeployment/suite_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -192,7 +192,7 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { // Patch the Machine. patchMachine := client.MergeFrom(m.DeepCopy()) - m.Spec.ProviderID = pointer.String(pid) + m.Spec.ProviderID = ptr.To(pid) g.Expect(env.Patch(ctx, m, patchMachine)).To(Succeed()) patchMachine = client.MergeFrom(m.DeepCopy()) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 1e716bc02c1b..fab1d0e34077 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -35,7 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log" @@ -1230,7 +1230,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { }, Spec: clusterv1.MachineSetSpec{ ClusterName: cluster.Name, - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), Selector: mhc.Spec.Selector, Template: clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{ @@ -1239,7 +1239,7 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { Spec: clusterv1.MachineSpec{ ClusterName: cluster.Name, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("test-data-secret-name"), + DataSecretName: ptr.To("test-data-secret-name"), }, InfrastructureRef: corev1.ObjectReference{ APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", @@ -2313,7 +2313,7 @@ func newRunningMachine(c *clusterv1.Cluster, labels map[string]string) *clusterv Spec: clusterv1.MachineSpec{ ClusterName: c.Name, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret-name"), + DataSecretName: ptr.To("data-secret-name"), }, }, Status: clusterv1.MachineStatus{ @@ -2496,7 +2496,7 @@ func createMachinesWithNodes( machine.Status.FailureReason = &failureReason } if o.failureMessage != "" { - machine.Status.FailureMessage = pointer.String(o.failureMessage) + machine.Status.FailureMessage = ptr.To(o.failureMessage) } // Adding one second to ensure there is a difference from the diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 65e4e00951ba..ff323b18d52c 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -1172,7 +1172,7 @@ func TestMachineSetReconciler_syncMachines(t *testing.T) { Namespace: namespace.Name, }, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("machine-bootstrap-secret"), + DataSecretName: ptr.To("machine-bootstrap-secret"), }, }, } @@ -1512,7 +1512,7 @@ func TestMachineSetReconciler_syncReplicas(t *testing.T) { Namespace: "default", }, Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, } @@ -1567,7 +1567,7 @@ func TestComputeDesiredMachine(t *testing.T) { }, Spec: clusterv1.MachineSetSpec{ ClusterName: "test-cluster", - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), MinReadySeconds: 10, Selector: metav1.LabelSelector{ MatchLabels: map[string]string{"k1": "v1"}, @@ -1578,7 +1578,7 @@ func TestComputeDesiredMachine(t *testing.T) { Annotations: map[string]string{"machine-annotation1": "machine-value1"}, }, Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.25.3"), + Version: ptr.To("v1.25.3"), InfrastructureRef: infraRef, Bootstrap: clusterv1.Bootstrap{ ConfigRef: &bootstrapRef, @@ -1604,7 +1604,7 @@ func TestComputeDesiredMachine(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "test-cluster", - Version: pointer.String("v1.25.3"), + Version: ptr.To("v1.25.3"), NodeDrainTimeout: duration10s, NodeVolumeDetachTimeout: duration10s, NodeDeletionTimeout: duration10s, diff --git a/internal/controllers/machineset/machineset_preflight.go b/internal/controllers/machineset/machineset_preflight.go index 984b8c8042eb..34c92fd6d4a5 100644 --- a/internal/controllers/machineset/machineset_preflight.go +++ b/internal/controllers/machineset/machineset_preflight.go @@ -29,7 +29,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -152,7 +152,7 @@ func (r *Reconciler) controlPlaneStablePreflightCheck(controlPlane *unstructured return nil, errors.Wrapf(err, "failed to perform %q preflight check: failed to check if ControlPlane %s is provisioning", clusterv1.MachineSetPreflightCheckControlPlaneIsStable, cpKlogRef) } if isProvisioning { - return pointer.String(fmt.Sprintf("ControlPlane %s is provisioning (%q preflight failed)", cpKlogRef, clusterv1.MachineSetPreflightCheckControlPlaneIsStable)), nil + return ptr.To(fmt.Sprintf("ControlPlane %s is provisioning (%q preflight failed)", cpKlogRef, clusterv1.MachineSetPreflightCheckControlPlaneIsStable)), nil } // Check that the control plane is not upgrading. @@ -161,7 +161,7 @@ func (r *Reconciler) controlPlaneStablePreflightCheck(controlPlane *unstructured return nil, errors.Wrapf(err, "failed to perform %q preflight check: failed to check if the ControlPlane %s is upgrading", clusterv1.MachineSetPreflightCheckControlPlaneIsStable, cpKlogRef) } if isUpgrading { - return pointer.String(fmt.Sprintf("ControlPlane %s is upgrading (%q preflight failed)", cpKlogRef, clusterv1.MachineSetPreflightCheckControlPlaneIsStable)), nil + return ptr.To(fmt.Sprintf("ControlPlane %s is upgrading (%q preflight failed)", cpKlogRef, clusterv1.MachineSetPreflightCheckControlPlaneIsStable)), nil } return nil, nil @@ -173,7 +173,7 @@ func (r *Reconciler) kubernetesVersionPreflightCheck(cpSemver, msSemver semver.V // => MS minor version cannot be outside of the supported skew. // Kubernetes skew policy: https://kubernetes.io/releases/version-skew-policy/#kubelet if msSemver.Minor > cpSemver.Minor { - return pointer.String(fmt.Sprintf("MachineSet version (%s) and ControlPlane version (%s) do not conform to the kubernetes version skew policy as MachineSet version is higher than ControlPlane version (%q preflight failed)", msSemver.String(), cpSemver.String(), clusterv1.MachineSetPreflightCheckKubernetesVersionSkew)) + return ptr.To(fmt.Sprintf("MachineSet version (%s) and ControlPlane version (%s) do not conform to the kubernetes version skew policy as MachineSet version is higher than ControlPlane version (%q preflight failed)", msSemver.String(), cpSemver.String(), clusterv1.MachineSetPreflightCheckKubernetesVersionSkew)) } minorSkew := uint64(3) // For Control Planes running Kubernetes < v1.28, the version skew policy for kubelets is two. @@ -181,7 +181,7 @@ func (r *Reconciler) kubernetesVersionPreflightCheck(cpSemver, msSemver semver.V minorSkew = 2 } if msSemver.Minor < cpSemver.Minor-minorSkew { - return pointer.String(fmt.Sprintf("MachineSet version (%s) and ControlPlane version (%s) do not conform to the kubernetes version skew policy as MachineSet version is more than %d minor versions older than the ControlPlane version (%q preflight failed)", msSemver.String(), cpSemver.String(), minorSkew, clusterv1.MachineSetPreflightCheckKubernetesVersionSkew)) + return ptr.To(fmt.Sprintf("MachineSet version (%s) and ControlPlane version (%s) do not conform to the kubernetes version skew policy as MachineSet version is more than %d minor versions older than the ControlPlane version (%q preflight failed)", msSemver.String(), cpSemver.String(), minorSkew, clusterv1.MachineSetPreflightCheckKubernetesVersionSkew)) } return nil @@ -205,7 +205,7 @@ func (r *Reconciler) kubeadmVersionPreflightCheck(cpSemver, msSemver semver.Vers groupVersion.Group == bootstrapv1.GroupVersion.Group if kubeadmBootstrapProviderUsed { if cpSemver.Minor != msSemver.Minor { - return pointer.String(fmt.Sprintf("MachineSet version (%s) and ControlPlane version (%s) do not conform to kubeadm version skew policy as kubeadm only supports joining with the same major+minor version as the control plane (%q preflight failed)", msSemver.String(), cpSemver.String(), clusterv1.MachineSetPreflightCheckKubeadmVersionSkew)), nil + return ptr.To(fmt.Sprintf("MachineSet version (%s) and ControlPlane version (%s) do not conform to kubeadm version skew policy as kubeadm only supports joining with the same major+minor version as the control plane (%q preflight failed)", msSemver.String(), cpSemver.String(), clusterv1.MachineSetPreflightCheckKubeadmVersionSkew)), nil } } return nil, nil diff --git a/internal/controllers/machineset/machineset_preflight_test.go b/internal/controllers/machineset/machineset_preflight_test.go index cd24ae16d144..ba598d0a171e 100644 --- a/internal/controllers/machineset/machineset_preflight_test.go +++ b/internal/controllers/machineset/machineset_preflight_test.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -182,7 +182,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.26.2"), + Version: ptr.To("v1.26.2"), Bootstrap: clusterv1.Bootstrap{ConfigRef: &corev1.ObjectReference{Kind: "KubeadmConfigTemplate"}}, }, }, @@ -241,7 +241,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.27.0.0"), + Version: ptr.To("v1.27.0.0"), }, }, }, @@ -266,7 +266,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.27.0"), + Version: ptr.To("v1.27.0"), }, }, }, @@ -291,7 +291,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.24.0"), + Version: ptr.To("v1.24.0"), }, }, }, @@ -316,7 +316,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.23.0"), + Version: ptr.To("v1.23.0"), }, }, }, @@ -344,7 +344,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.27.0"), + Version: ptr.To("v1.27.0"), }, }, }, @@ -369,7 +369,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.25.0"), + Version: ptr.To("v1.25.0"), }, }, }, @@ -394,7 +394,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.24.0"), + Version: ptr.To("v1.24.0"), }, }, }, @@ -419,7 +419,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.25.5"), + Version: ptr.To("v1.25.5"), Bootstrap: clusterv1.Bootstrap{ConfigRef: &corev1.ObjectReference{ APIVersion: bootstrapv1.GroupVersion.String(), Kind: "KubeadmConfigTemplate", @@ -448,7 +448,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.25.0"), + Version: ptr.To("v1.25.0"), }, }, }, @@ -476,7 +476,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.25.0"), + Version: ptr.To("v1.25.0"), Bootstrap: clusterv1.Bootstrap{ConfigRef: &corev1.ObjectReference{ APIVersion: bootstrapv1.GroupVersion.String(), Kind: "KubeadmConfigTemplate", @@ -505,7 +505,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.26.2"), + Version: ptr.To("v1.26.2"), Bootstrap: clusterv1.Bootstrap{ConfigRef: &corev1.ObjectReference{ APIVersion: bootstrapv1.GroupVersion.String(), Kind: "KubeadmConfigTemplate", @@ -534,7 +534,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.26.2"), + Version: ptr.To("v1.26.2"), Bootstrap: clusterv1.Bootstrap{ConfigRef: &corev1.ObjectReference{ APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1/invalid", Kind: "KubeadmConfigTemplate", @@ -590,7 +590,7 @@ func TestMachineSetReconciler_runPreflightChecks(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.26.0"), + Version: ptr.To("v1.26.0"), Bootstrap: clusterv1.Bootstrap{ConfigRef: &corev1.ObjectReference{ APIVersion: bootstrapv1.GroupVersion.String(), Kind: "KubeadmConfigTemplate", diff --git a/internal/controllers/machineset/suite_test.go b/internal/controllers/machineset/suite_test.go index c7fdde445ddf..65a1a880f45f 100644 --- a/internal/controllers/machineset/suite_test.go +++ b/internal/controllers/machineset/suite_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/uuid" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -201,7 +201,7 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { // Patch the Machine. patchMachine := client.MergeFrom(m.DeepCopy()) - m.Spec.ProviderID = pointer.String(pid) + m.Spec.ProviderID = ptr.To(pid) g.Expect(env.Patch(ctx, m, patchMachine)).To(Succeed()) patchMachine = client.MergeFrom(m.DeepCopy()) diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index 6c537489da48..b2d56865a36d 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -733,7 +733,7 @@ func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploy Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: s.Current.Cluster.Name, - Version: pointer.String(version), + Version: ptr.To(version), Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef}, InfrastructureRef: *desiredInfraMachineTemplateRef, FailureDomain: failureDomain, @@ -1082,7 +1082,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: s.Current.Cluster.Name, - Version: pointer.String(version), + Version: ptr.To(version), Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef}, InfrastructureRef: *desiredInfraMachinePoolRef, NodeDrainTimeout: nodeDrainTimeout, diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/internal/controllers/topology/cluster/desired_state_test.go index e402cd95e4b4..f304450fe192 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/internal/controllers/topology/cluster/desired_state_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -833,7 +833,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ Version: tt.topologyVersion, ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }}, Current: &scope.ClusterState{ @@ -1220,7 +1220,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ Version: "v1.2.3", ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }}, Current: &scope.ClusterState{ @@ -1515,7 +1515,7 @@ func TestComputeMachineDeployment(t *testing.T) { Replicas: ¤tReplicas, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String(version), + Version: ptr.To(version), Bootstrap: clusterv1.Bootstrap{ ConfigRef: contract.ObjToRef(workerBootstrapTemplate), }, @@ -1629,7 +1629,7 @@ func TestComputeMachineDeployment(t *testing.T) { name: "use machine deployment's spec.template.spec.version if one of the machine deployments is upgrading, concurrency limit reached", upgradingMachineDeployments: []string{"upgrading-md1"}, upgradeConcurrency: "1", - currentMDVersion: pointer.String("v1.2.2"), + currentMDVersion: ptr.To("v1.2.2"), topologyVersion: "v1.2.3", expectedVersion: "v1.2.2", }, @@ -1637,7 +1637,7 @@ func TestComputeMachineDeployment(t *testing.T) { name: "use cluster.spec.topology.version if one of the machine deployments is upgrading, concurrency limit not reached", upgradingMachineDeployments: []string{"upgrading-md1"}, upgradeConcurrency: "2", - currentMDVersion: pointer.String("v1.2.2"), + currentMDVersion: ptr.To("v1.2.2"), topologyVersion: "v1.2.3", expectedVersion: "v1.2.3", }, @@ -1656,7 +1656,7 @@ func TestComputeMachineDeployment(t *testing.T) { s.Blueprint = blueprint s.Blueprint.Topology.Version = tt.topologyVersion s.Blueprint.Topology.ControlPlane = clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), } s.Blueprint.Topology.Workers = &clusterv1.WorkersTopology{} @@ -1689,7 +1689,7 @@ func TestComputeMachineDeployment(t *testing.T) { mdTopology := clusterv1.MachineDeploymentTopology{ Class: "linux-worker", Name: "big-pool-of-machines", - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), } s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...) obj, err := computeMachineDeployment(ctx, s, mdTopology) @@ -1906,7 +1906,7 @@ func TestComputeMachinePool(t *testing.T) { Replicas: ¤tReplicas, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String(version), + Version: ptr.To(version), Bootstrap: clusterv1.Bootstrap{ ConfigRef: contract.ObjToRef(workerBootstrapConfig), }, @@ -2015,7 +2015,7 @@ func TestComputeMachinePool(t *testing.T) { name: "use machine pool's spec.template.spec.version if one of the machine pools is upgrading, concurrency limit reached", upgradingMachinePools: []string{"upgrading-mp1"}, upgradeConcurrency: "1", - currentMPVersion: pointer.String("v1.2.2"), + currentMPVersion: ptr.To("v1.2.2"), topologyVersion: "v1.2.3", expectedVersion: "v1.2.2", }, @@ -2023,7 +2023,7 @@ func TestComputeMachinePool(t *testing.T) { name: "use cluster.spec.topology.version if one of the machine pools is upgrading, concurrency limit not reached", upgradingMachinePools: []string{"upgrading-mp1"}, upgradeConcurrency: "2", - currentMPVersion: pointer.String("v1.2.2"), + currentMPVersion: ptr.To("v1.2.2"), topologyVersion: "v1.2.3", expectedVersion: "v1.2.3", }, @@ -2042,7 +2042,7 @@ func TestComputeMachinePool(t *testing.T) { s.Blueprint = blueprint s.Blueprint.Topology.Version = tt.topologyVersion s.Blueprint.Topology.ControlPlane = clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), } s.Blueprint.Topology.Workers = &clusterv1.WorkersTopology{} @@ -2073,7 +2073,7 @@ func TestComputeMachinePool(t *testing.T) { mpTopology := clusterv1.MachinePoolTopology{ Class: "linux-worker", Name: "big-pool-of-machines", - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), } s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) obj, err := computeMachinePool(ctx, s, mpTopology) @@ -2218,7 +2218,7 @@ func TestComputeMachineDeploymentVersion(t *testing.T) { Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ Version: tt.topologyVersion, ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Workers: &clusterv1.WorkersTopology{}, }}, @@ -2396,7 +2396,7 @@ func TestComputeMachinePoolVersion(t *testing.T) { Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ Version: tt.topologyVersion, ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, Workers: &clusterv1.WorkersTopology{}, }}, diff --git a/internal/controllers/topology/cluster/patches/engine_test.go b/internal/controllers/topology/cluster/patches/engine_test.go index b7093aae8b49..23b2843afafc 100644 --- a/internal/controllers/topology/cluster/patches/engine_test.go +++ b/internal/controllers/topology/cluster/patches/engine_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -406,8 +406,8 @@ func TestApply(t *testing.T) { { Name: "fake-patch1", External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("patch-infrastructureCluster"), - ValidateExtension: pointer.String("validate-infrastructureCluster"), + GenerateExtension: ptr.To("patch-infrastructureCluster"), + ValidateExtension: ptr.To("validate-infrastructureCluster"), }, }, }, @@ -442,8 +442,8 @@ func TestApply(t *testing.T) { { Name: "fake-patch1", External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("patch-infrastructureCluster"), - ValidateExtension: pointer.String("validate-infrastructureCluster"), + GenerateExtension: ptr.To("patch-infrastructureCluster"), + ValidateExtension: ptr.To("validate-infrastructureCluster"), }, }, }, @@ -475,13 +475,13 @@ func TestApply(t *testing.T) { { Name: "fake-patch1", External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("patch-infrastructureCluster"), + GenerateExtension: ptr.To("patch-infrastructureCluster"), }, }, { Name: "fake-patch2", External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("patch-controlPlane"), + GenerateExtension: ptr.To("patch-controlPlane"), }, }, }, @@ -545,7 +545,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/clusterName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.cluster.name"), + Variable: ptr.To("builtin.cluster.name"), }, }, }, @@ -563,7 +563,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/controlPlaneName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.controlPlane.name"), + Variable: ptr.To("builtin.controlPlane.name"), }, }, }, @@ -581,7 +581,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/controlPlaneName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.controlPlane.name"), + Variable: ptr.To("builtin.controlPlane.name"), }, }, }, @@ -601,7 +601,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/machineDeploymentTopologyName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.machineDeployment.topologyName"), + Variable: ptr.To("builtin.machineDeployment.topologyName"), }, }, }, @@ -621,7 +621,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/machineDeploymentTopologyName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.machineDeployment.topologyName"), + Variable: ptr.To("builtin.machineDeployment.topologyName"), }, }, }, @@ -641,7 +641,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/machinePoolTopologyName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.machinePool.topologyName"), + Variable: ptr.To("builtin.machinePool.topologyName"), }, }, }, @@ -661,7 +661,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/machinePoolTopologyName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.machinePool.topologyName"), + Variable: ptr.To("builtin.machinePool.topologyName"), }, }, }, @@ -746,7 +746,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("infraCluster"), + Variable: ptr.To("infraCluster"), }, }, }, @@ -766,7 +766,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("default-worker-infra"), + Variable: ptr.To("default-worker-infra"), }, }, }, @@ -786,7 +786,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("default-worker-infra"), + Variable: ptr.To("default-worker-infra"), }, }, }, @@ -806,7 +806,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("default-mp-worker-infra"), + Variable: ptr.To("default-mp-worker-infra"), }, }, }, @@ -826,7 +826,7 @@ func TestApply(t *testing.T) { Op: "add", Path: "/spec/template/spec/resource", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("default-mp-worker-infra"), + Variable: ptr.To("default-mp-worker-infra"), }, }, }, @@ -1027,7 +1027,7 @@ func setupTestObjects() (*scope.ClusterBlueprint, *scope.ClusterState) { Spec: clusterv1.ClusterSpec{ Paused: false, ClusterNetwork: &clusterv1.ClusterNetwork{ - APIServerPort: pointer.Int32(8), + APIServerPort: ptr.To[int32](8), Services: &clusterv1.NetworkRanges{ CIDRBlocks: []string{"10.10.10.1/24"}, }, @@ -1042,7 +1042,7 @@ func setupTestObjects() (*scope.ClusterBlueprint, *scope.ClusterState) { Version: "v1.21.2", Class: clusterClass.Name, ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), }, Variables: []clusterv1.ClusterVariable{ { @@ -1089,7 +1089,7 @@ func setupTestObjects() (*scope.ClusterBlueprint, *scope.ClusterState) { Metadata: clusterv1.ObjectMeta{}, Class: "default-worker", Name: "default-worker-topo2", - Replicas: pointer.Int32(5), + Replicas: ptr.To[int32](5), }, }, MachinePools: []clusterv1.MachinePoolTopology{ @@ -1111,7 +1111,7 @@ func setupTestObjects() (*scope.ClusterBlueprint, *scope.ClusterState) { Metadata: clusterv1.ObjectMeta{}, Class: "default-mp-worker", Name: "default-mp-worker-topo2", - Replicas: pointer.Int32(5), + Replicas: ptr.To[int32](5), }, }, }, diff --git a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go index 10ecee68ca93..74e890eb34be 100644 --- a/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/external/external_patch_generator_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" @@ -53,7 +53,7 @@ func TestExternalPatchGenerator_Generate(t *testing.T) { EnabledIf: nil, Definitions: nil, External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("test-generate-extension"), + GenerateExtension: ptr.To("test-generate-extension"), Settings: nil, }, }, @@ -72,7 +72,7 @@ func TestExternalPatchGenerator_Generate(t *testing.T) { EnabledIf: nil, Definitions: nil, External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("test-generate-extension"), + GenerateExtension: ptr.To("test-generate-extension"), Settings: map[string]string{ "key1": "value1", }, diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go index 0af03273a98a..39e8b5e6dfa9 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" @@ -66,7 +66,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/valueFrom/variable", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableA"), + Variable: ptr.To("variableA"), }, }, // .valueFrom.template using sprig functions @@ -74,7 +74,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/valueFrom/template", ValueFrom: &clusterv1.JSONPatchValue{ - Template: pointer.String(`template {{ .variableB | lower | repeat 5 }}`), + Template: ptr.To(`template {{ .variableB | lower | repeat 5 }}`), }, }, // template-specific variable takes precedent, if the same variable exists @@ -83,7 +83,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/templatePrecedent", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableC"), + Variable: ptr.To("variableC"), }, }, // global builtin variable should work. @@ -92,7 +92,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/builtinClusterName", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.cluster.name"), + Variable: ptr.To("builtin.cluster.name"), }, }, // template-specific builtin variable should work. @@ -101,7 +101,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/builtinControlPlaneReplicas", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.controlPlane.replicas"), + Variable: ptr.To("builtin.controlPlane.replicas"), }, }, // test .builtin.controlPlane.machineTemplate.InfrastructureRef.name var. @@ -109,7 +109,7 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/template/spec/files", ValueFrom: &clusterv1.JSONPatchValue{ - Template: pointer.String(`[{"contentFrom":{"secret":{"key":"control-plane-azure.json","name":"{{ .builtin.controlPlane.machineTemplate.infrastructureRef.name }}-azure-json"}}}]`), + Template: ptr.To(`[{"contentFrom":{"secret":{"key":"control-plane-azure.json","name":"{{ .builtin.controlPlane.machineTemplate.infrastructureRef.name }}-azure-json"}}}]`), }, }, }, @@ -208,14 +208,14 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.cluster.name"), + Variable: ptr.To("builtin.cluster.name"), }, }, { Op: "replace", Path: "/spec/template/spec/kubeadmConfigSpec/files", ValueFrom: &clusterv1.JSONPatchValue{ - Template: pointer.String(` + Template: ptr.To(` - contentFrom: secret: key: control-plane-azure.json @@ -245,14 +245,14 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.cluster.name"), + Variable: ptr.To("builtin.cluster.name"), }, }, { Op: "replace", Path: "/spec/template/spec/files", ValueFrom: &clusterv1.JSONPatchValue{ - Template: pointer.String(` + Template: ptr.To(` [{ "contentFrom":{ "secret":{ @@ -281,14 +281,14 @@ func TestGenerate(t *testing.T) { Op: "replace", Path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.cluster.name"), + Variable: ptr.To("builtin.cluster.name"), }, }, { Op: "replace", Path: "/spec/template/spec/files", ValueFrom: &clusterv1.JSONPatchValue{ - Template: pointer.String(` + Template: ptr.To(` [{ "contentFrom":{ "secret":{ @@ -1295,31 +1295,31 @@ func TestPatchIsEnabled(t *testing.T) { }, { name: "Fail if template is invalid", - enabledIf: pointer.String(`{{ variable }}`), // . is missing + enabledIf: ptr.To(`{{ variable }}`), // . is missing wantErr: true, }, // Hardcoded value. { name: "Enabled if template is true ", - enabledIf: pointer.String(`true`), + enabledIf: ptr.To(`true`), want: true, }, { name: "Enabled if template is true (even with leading and trailing new line)", - enabledIf: pointer.String(` + enabledIf: ptr.To(` true `), want: true, }, { name: "Disabled if template is false", - enabledIf: pointer.String(`false`), + enabledIf: ptr.To(`false`), want: false, }, // Boolean variable. { name: "Enabled if simple template with boolean variable evaluates to true", - enabledIf: pointer.String(`{{ .httpProxyEnabled }}`), + enabledIf: ptr.To(`{{ .httpProxyEnabled }}`), variables: map[string]apiextensionsv1.JSON{ "httpProxyEnabled": {Raw: []byte(`true`)}, }, @@ -1327,7 +1327,7 @@ true }, { name: "Enabled if simple template with boolean variable evaluates to true (even with leading and trailing new line", - enabledIf: pointer.String(` + enabledIf: ptr.To(` {{ .httpProxyEnabled }} `), variables: map[string]apiextensionsv1.JSON{ @@ -1337,7 +1337,7 @@ true }, { name: "Disabled if simple template with boolean variable evaluates to false", - enabledIf: pointer.String(`{{ .httpProxyEnabled }}`), + enabledIf: ptr.To(`{{ .httpProxyEnabled }}`), variables: map[string]apiextensionsv1.JSON{ "httpProxyEnabled": {Raw: []byte(`false`)}, }, @@ -1347,7 +1347,7 @@ true { name: "Enabled if template with if evaluates to true", // Else is not needed because we check if the result is equal to true. - enabledIf: pointer.String(`{{ if eq "v1.21.1" .builtin.cluster.topology.version }}true{{end}}`), + enabledIf: ptr.To(`{{ if eq "v1.21.1" .builtin.cluster.topology.version }}true{{end}}`), variables: map[string]apiextensionsv1.JSON{ "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1355,7 +1355,7 @@ true }, { name: "Disabled if template with if evaluates to false", - enabledIf: pointer.String(`{{ if eq "v1.21.2" .builtin.cluster.topology.version }}true{{end}}`), + enabledIf: ptr.To(`{{ if eq "v1.21.2" .builtin.cluster.topology.version }}true{{end}}`), variables: map[string]apiextensionsv1.JSON{ "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1363,7 +1363,7 @@ true }, { name: "Enabled if template with if/else evaluates to true", - enabledIf: pointer.String(`{{ if eq "v1.21.1" .builtin.cluster.topology.version }}true{{else}}false{{end}}`), + enabledIf: ptr.To(`{{ if eq "v1.21.1" .builtin.cluster.topology.version }}true{{else}}false{{end}}`), variables: map[string]apiextensionsv1.JSON{ "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1371,7 +1371,7 @@ true }, { name: "Disabled if template with if/else evaluates to false", - enabledIf: pointer.String(`{{ if eq "v1.21.2" .builtin.cluster.topology.version }}true{{else}}false{{end}}`), + enabledIf: ptr.To(`{{ if eq "v1.21.2" .builtin.cluster.topology.version }}true{{else}}false{{end}}`), variables: map[string]apiextensionsv1.JSON{ "builtin": {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1380,7 +1380,7 @@ true // Render value with if to check if var is not empty. { name: "Enabled if template which checks if variable is set evaluates to true", - enabledIf: pointer.String(`{{ if .variableA }}true{{end}}`), + enabledIf: ptr.To(`{{ if .variableA }}true{{end}}`), variables: map[string]apiextensionsv1.JSON{ "variableA": {Raw: []byte(`"abc"`)}, }, @@ -1388,7 +1388,7 @@ true }, { name: "Disabled if template which checks if variable is set evaluates to false (variable empty)", - enabledIf: pointer.String(`{{ if .variableA }}true{{end}}`), + enabledIf: ptr.To(`{{ if .variableA }}true{{end}}`), variables: map[string]apiextensionsv1.JSON{ "variableA": {Raw: []byte(``)}, }, @@ -1396,7 +1396,7 @@ true }, { name: "Disabled if template which checks if variable is set evaluates to false (variable empty string)", - enabledIf: pointer.String(`{{ if .variableA }}true{{end}}`), + enabledIf: ptr.To(`{{ if .variableA }}true{{end}}`), variables: map[string]apiextensionsv1.JSON{ "variableA": {Raw: []byte(`""`)}, }, @@ -1404,7 +1404,7 @@ true }, { name: "Disabled if template which checks if variable is set evaluates to false (variable does not exist)", - enabledIf: pointer.String(`{{ if .variableA }}true{{end}}`), + enabledIf: ptr.To(`{{ if .variableA }}true{{end}}`), variables: map[string]apiextensionsv1.JSON{ "variableB": {Raw: []byte(``)}, }, @@ -1415,7 +1415,7 @@ true // test mostly exists to visualize how user-defined object variables can be used. { name: "Enabled if template with complex variable evaluates to true", - enabledIf: pointer.String(`{{ if .httpProxy.enabled }}true{{end}}`), + enabledIf: ptr.To(`{{ if .httpProxy.enabled }}true{{end}}`), variables: map[string]apiextensionsv1.JSON{ "httpProxy": {Raw: []byte(`{"enabled": true, "url": "localhost:3128", "noProxy": "internal.example.com"}`)}, }, @@ -1423,7 +1423,7 @@ true }, { name: "Disabled if template with complex variable evaluates to false", - enabledIf: pointer.String(`{{ if .httpProxy.enabled }}true{{end}}`), + enabledIf: ptr.To(`{{ if .httpProxy.enabled }}true{{end}}`), variables: map[string]apiextensionsv1.JSON{ "httpProxy": {Raw: []byte(`{"enabled": false, "url": "localhost:3128", "noProxy": "internal.example.com"}`)}, }, @@ -1464,7 +1464,7 @@ func TestCalculateValue(t *testing.T) { patch: clusterv1.JSONPatch{ Value: &apiextensionsv1.JSON{Raw: []byte(`"value"`)}, ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableA"), + Variable: ptr.To("variableA"), }, }, wantErr: true, @@ -1473,8 +1473,8 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable and .valueFrom.template are set", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableA"), - Template: pointer.String("template"), + Variable: ptr.To("variableA"), + Template: ptr.To("template"), }, }, wantErr: true, @@ -1497,7 +1497,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableA"), + Variable: ptr.To("variableA"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1509,7 +1509,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable is set but variable does not exist", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableA"), + Variable: ptr.To("variableA"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1521,7 +1521,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: builtinVariable int", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.controlPlane.replicas"), + Variable: ptr.To("builtin.controlPlane.replicas"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1533,7 +1533,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: builtinVariable string", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.cluster.topology.version"), + Variable: ptr.To("builtin.cluster.topology.version"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1545,7 +1545,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: variable 'builtin'", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin"), + Variable: ptr.To("builtin"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1557,7 +1557,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: variable 'builtin.cluster'", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.cluster"), + Variable: ptr.To("builtin.cluster"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1569,7 +1569,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: variable 'builtin.cluster.topology'", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.cluster.topology"), + Variable: ptr.To("builtin.cluster.topology"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1582,7 +1582,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return rendered .valueFrom.template if set", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Template: pointer.String("{{ .variableA }}"), + Template: ptr.To("{{ .variableA }}"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1595,7 +1595,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: whole object", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject"), + Variable: ptr.To("variableObject"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1607,7 +1607,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested bool property", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject.boolProperty"), + Variable: ptr.To("variableObject.boolProperty"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1619,7 +1619,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested integer property", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject.integerProperty"), + Variable: ptr.To("variableObject.integerProperty"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1631,7 +1631,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested string property", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject.enumProperty"), + Variable: ptr.To("variableObject.enumProperty"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1643,7 +1643,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable object variable does not exist", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject.enumProperty"), + Variable: ptr.To("variableObject.enumProperty"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1655,7 +1655,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable nested object property does not exist", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject.nonExistingProperty"), + Variable: ptr.To("variableObject.nonExistingProperty"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1668,7 +1668,7 @@ func TestCalculateValue(t *testing.T) { patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ // NOTE: it's not possible to access a property of an array element without index. - Variable: pointer.String("variableObject.nonExistingProperty"), + Variable: ptr.To("variableObject.nonExistingProperty"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1681,7 +1681,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested object property top-level", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject"), + Variable: ptr.To("variableObject"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1693,7 +1693,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested object property firstLevel", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject.firstLevel"), + Variable: ptr.To("variableObject.firstLevel"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1705,7 +1705,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested object property secondLevel", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject.firstLevel.secondLevel"), + Variable: ptr.To("variableObject.firstLevel.secondLevel"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1717,7 +1717,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested object property leaf", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableObject.firstLevel.secondLevel.leaf"), + Variable: ptr.To("variableObject.firstLevel.secondLevel.leaf"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1730,7 +1730,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: array", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray"), + Variable: ptr.To("variableArray"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1742,7 +1742,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: array element", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray[0]"), + Variable: ptr.To("variableArray[0]"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1754,7 +1754,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested array", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel"), + Variable: ptr.To("variableArray.firstLevel"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1766,7 +1766,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested array element", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel[1]"), + Variable: ptr.To("variableArray.firstLevel[1]"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1778,7 +1778,7 @@ func TestCalculateValue(t *testing.T) { name: "Should return .valueFrom.variable if set: nested field of nested array element", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel[1].secondLevel"), + Variable: ptr.To("variableArray.firstLevel[1].secondLevel"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1790,7 +1790,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: only left delimiter", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel["), + Variable: ptr.To("variableArray.firstLevel["), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1802,7 +1802,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: only right delimiter", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel]"), + Variable: ptr.To("variableArray.firstLevel]"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1814,7 +1814,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: no index", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel[]"), + Variable: ptr.To("variableArray.firstLevel[]"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1826,7 +1826,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: text index", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel[someText]"), + Variable: ptr.To("variableArray.firstLevel[someText]"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1838,7 +1838,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: negative index", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel[-1]"), + Variable: ptr.To("variableArray.firstLevel[-1]"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1850,7 +1850,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: index out of bounds", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel[1]"), + Variable: ptr.To("variableArray.firstLevel[1]"), }, }, variables: map[string]apiextensionsv1.JSON{ @@ -1862,7 +1862,7 @@ func TestCalculateValue(t *testing.T) { name: "Fails if .valueFrom.variable array path is invalid: variable is an object instead", patch: clusterv1.JSONPatch{ ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableArray.firstLevel[1]"), + Variable: ptr.To("variableArray.firstLevel[1]"), }, }, variables: map[string]apiextensionsv1.JSON{ diff --git a/internal/controllers/topology/cluster/patches/variables/value.go b/internal/controllers/topology/cluster/patches/variables/value.go index 83319688deeb..bff3efa0aaf5 100644 --- a/internal/controllers/topology/cluster/patches/variables/value.go +++ b/internal/controllers/topology/cluster/patches/variables/value.go @@ -24,7 +24,7 @@ import ( "github.com/pkg/errors" "github.com/valyala/fastjson" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -144,7 +144,7 @@ func parsePathSegment(segment string) (*pathSegment, error) { return &pathSegment{ path: segment[:strings.Index(segment, leftArrayDelim)], //nolint:gocritic // We already check above that segment contains leftArrayDelim, - index: pointer.Int(index), + index: ptr.To(index), }, nil } diff --git a/internal/controllers/topology/cluster/patches/variables/value_test.go b/internal/controllers/topology/cluster/patches/variables/value_test.go index d1abb2004273..b4afd5a019cf 100644 --- a/internal/controllers/topology/cluster/patches/variables/value_test.go +++ b/internal/controllers/topology/cluster/patches/variables/value_test.go @@ -20,7 +20,7 @@ import ( "testing" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) func TestParsePathSegment(t *testing.T) { @@ -43,7 +43,7 @@ func TestParsePathSegment(t *testing.T) { segment: "arrayProperty[5]", wantPathSegment: &pathSegment{ path: "arrayProperty", - index: pointer.Int(5), + index: ptr.To(5), }, }, { diff --git a/internal/controllers/topology/cluster/patches/variables/variables.go b/internal/controllers/topology/cluster/patches/variables/variables.go index 6fc7ff8f1457..dd7ac0b7286c 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables.go +++ b/internal/controllers/topology/cluster/patches/variables/variables.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -336,7 +336,7 @@ func MachineDeployment(mdTopology *clusterv1.MachineDeploymentTopology, md *clus }, } if md.Spec.Replicas != nil { - builtin.MachineDeployment.Replicas = pointer.Int64(int64(*md.Spec.Replicas)) + builtin.MachineDeployment.Replicas = ptr.To[int64](int64(*md.Spec.Replicas)) } if mdBootstrapTemplate != nil { @@ -389,7 +389,7 @@ func MachinePool(mpTopology *clusterv1.MachinePoolTopology, mp *expv1.MachinePoo }, } if mp.Spec.Replicas != nil { - builtin.MachinePool.Replicas = pointer.Int64(int64(*mp.Spec.Replicas)) + builtin.MachinePool.Replicas = ptr.To[int64](int64(*mp.Spec.Replicas)) } if mpBootstrapObject != nil { diff --git a/internal/controllers/topology/cluster/patches/variables/variables_test.go b/internal/controllers/topology/cluster/patches/variables/variables_test.go index 775ef609e544..7b6341deb80e 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables_test.go +++ b/internal/controllers/topology/cluster/patches/variables/variables_test.go @@ -25,7 +25,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -412,7 +412,7 @@ func TestControlPlane(t *testing.T) { { name: "Should calculate ControlPlane variables", controlPlaneTopology: &clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), }, controlPlane: builder.ControlPlane(metav1.NamespaceDefault, "controlPlane1"). WithReplicas(3). @@ -450,7 +450,7 @@ func TestControlPlane(t *testing.T) { { name: "Should calculate ControlPlane variables with InfrastructureMachineTemplate", controlPlaneTopology: &clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), }, controlPlane: builder.ControlPlane(metav1.NamespaceDefault, "controlPlane1"). WithReplicas(3). @@ -503,7 +503,7 @@ func TestMachineDeployment(t *testing.T) { variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, forPatch: "patch1", mdTopology: &clusterv1.MachineDeploymentTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "md-topology", Class: "md-class", Variables: &clusterv1.MachineDeploymentVariables{ @@ -553,7 +553,7 @@ func TestMachineDeployment(t *testing.T) { "cpu": true, }, mdTopology: &clusterv1.MachineDeploymentTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "md-topology", Class: "md-class", Variables: &clusterv1.MachineDeploymentVariables{ @@ -615,7 +615,7 @@ func TestMachineDeployment(t *testing.T) { forPatch: "patch1", variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, mdTopology: &clusterv1.MachineDeploymentTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "md-topology", Class: "md-class", }, @@ -686,7 +686,7 @@ func TestMachineDeployment(t *testing.T) { variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, forPatch: "patch1", mdTopology: &clusterv1.MachineDeploymentTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "md-topology", Class: "md-class", Variables: &clusterv1.MachineDeploymentVariables{ @@ -739,7 +739,7 @@ func TestMachineDeployment(t *testing.T) { variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, forPatch: "patch1", mdTopology: &clusterv1.MachineDeploymentTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "md-topology", Class: "md-class", Variables: &clusterv1.MachineDeploymentVariables{ @@ -790,7 +790,7 @@ func TestMachineDeployment(t *testing.T) { variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, forPatch: "patch1", mdTopology: &clusterv1.MachineDeploymentTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "md-topology", Class: "md-class", Variables: &clusterv1.MachineDeploymentVariables{ @@ -870,7 +870,7 @@ func TestMachinePool(t *testing.T) { variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, forPatch: "patch1", mpTopology: &clusterv1.MachinePoolTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "mp-topology", Class: "mp-class", Variables: &clusterv1.MachinePoolVariables{ @@ -920,7 +920,7 @@ func TestMachinePool(t *testing.T) { "cpu": true, }, mpTopology: &clusterv1.MachinePoolTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "mp-topology", Class: "mp-class", Variables: &clusterv1.MachinePoolVariables{ @@ -982,7 +982,7 @@ func TestMachinePool(t *testing.T) { forPatch: "patch1", variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, mpTopology: &clusterv1.MachinePoolTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "mp-topology", Class: "mp-class", }, @@ -1053,7 +1053,7 @@ func TestMachinePool(t *testing.T) { variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, forPatch: "patch1", mpTopology: &clusterv1.MachinePoolTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "mp-topology", Class: "mp-class", Variables: &clusterv1.MachinePoolVariables{ @@ -1106,7 +1106,7 @@ func TestMachinePool(t *testing.T) { variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, forPatch: "patch1", mpTopology: &clusterv1.MachinePoolTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "mp-topology", Class: "mp-class", Variables: &clusterv1.MachinePoolVariables{ @@ -1157,7 +1157,7 @@ func TestMachinePool(t *testing.T) { variableDefinitionsForPatch: map[string]bool{"location": true, "cpu": true}, forPatch: "patch1", mpTopology: &clusterv1.MachinePoolTopology{ - Replicas: pointer.Int32(3), + Replicas: ptr.To[int32](3), Name: "mp-topology", Class: "mp-class", Variables: &clusterv1.MachinePoolVariables{ diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index cdbda35e6315..80e33b5f9f59 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" @@ -515,7 +515,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -545,7 +545,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -582,7 +582,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -619,7 +619,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -656,7 +656,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -693,7 +693,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -731,7 +731,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -769,7 +769,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -806,7 +806,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -843,7 +843,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -880,7 +880,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -917,7 +917,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -955,7 +955,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -993,7 +993,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, @@ -1030,7 +1030,7 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { Blueprint: &scope.ClusterBlueprint{ Topology: &clusterv1.Topology{ ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + Replicas: ptr.To[int32](2), }, }, }, diff --git a/internal/controllers/topology/cluster/scope/blueprint_test.go b/internal/controllers/topology/cluster/scope/blueprint_test.go index 712563ed43cb..66071316d996 100644 --- a/internal/controllers/topology/cluster/scope/blueprint_test.go +++ b/internal/controllers/topology/cluster/scope/blueprint_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/test/builder" @@ -80,7 +80,7 @@ func TestIsControlPlaneMachineHealthCheckEnabled(t *testing.T) { Topology: builder.ClusterTopology(). WithClass("cluster-class"). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(false), + Enable: ptr.To(false), }). Build(), }, @@ -96,7 +96,7 @@ func TestIsControlPlaneMachineHealthCheckEnabled(t *testing.T) { Topology: builder.ClusterTopology(). WithClass("cluster-class"). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), }). Build(), }, @@ -134,7 +134,7 @@ func TestIsControlPlaneMachineHealthCheckEnabled(t *testing.T) { Topology: builder.ClusterTopology(). WithClass("cluster-class"). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(false), + Enable: ptr.To(false), MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyConditions: []clusterv1.UnhealthyCondition{ { @@ -158,7 +158,7 @@ func TestIsControlPlaneMachineHealthCheckEnabled(t *testing.T) { Topology: builder.ClusterTopology(). WithClass("cluster-class"). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyConditions: []clusterv1.UnhealthyCondition{ { @@ -292,7 +292,7 @@ func TestIsMachineDeploymentMachineHealthCheckEnabled(t *testing.T) { mdTopology: &clusterv1.MachineDeploymentTopology{ Class: "worker-class", MachineHealthCheck: &clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(false), + Enable: ptr.To(false), }, }, want: false, @@ -309,7 +309,7 @@ func TestIsMachineDeploymentMachineHealthCheckEnabled(t *testing.T) { mdTopology: &clusterv1.MachineDeploymentTopology{ Class: "worker-class", MachineHealthCheck: &clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), }, }, want: true, @@ -347,7 +347,7 @@ func TestIsMachineDeploymentMachineHealthCheckEnabled(t *testing.T) { mdTopology: &clusterv1.MachineDeploymentTopology{ Class: "worker-class", MachineHealthCheck: &clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(false), + Enable: ptr.To(false), MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyConditions: []clusterv1.UnhealthyCondition{ { @@ -371,7 +371,7 @@ func TestIsMachineDeploymentMachineHealthCheckEnabled(t *testing.T) { mdTopology: &clusterv1.MachineDeploymentTopology{ Class: "worker-class", MachineHealthCheck: &clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyConditions: []clusterv1.UnhealthyCondition{ { diff --git a/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go b/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go index c0d5e85a96c8..1a234f49d59e 100644 --- a/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go +++ b/internal/controllers/topology/cluster/structuredmerge/serversidepathhelper_test.go @@ -34,7 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -816,7 +816,7 @@ func setupWebhookWithManager(ns *corev1.Namespace) (*KubeadmConfigTemplateTestDe { Name: ns.Name + ".kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io", ClientConfig: admissionv1.WebhookClientConfig{ - URL: pointer.String(fmt.Sprintf("https://%s%s", net.JoinHostPort(webhookHost, strconv.Itoa(webhookServer.Options.Port)), webhookPath)), + URL: ptr.To(fmt.Sprintf("https://%s%s", net.JoinHostPort(webhookHost, strconv.Itoa(webhookServer.Options.Port)), webhookPath)), CABundle: caBundle, }, Rules: []admissionv1.RuleWithOperations{ diff --git a/internal/runtime/client/client.go b/internal/runtime/client/client.go index 18c0eece619b..1d9f2b3dc63e 100644 --- a/internal/runtime/client/client.go +++ b/internal/runtime/client/client.go @@ -40,7 +40,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/transport" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -631,7 +631,7 @@ func defaultDiscoveryResponse(discovery *runtimehooksv1.DiscoveryResponse) *runt // If TimeoutSeconds is not defined set to 10. if handler.TimeoutSeconds == nil { - handler.TimeoutSeconds = pointer.Int32(runtimehooksv1.DefaultHandlersTimeoutSeconds) + handler.TimeoutSeconds = ptr.To[int32](runtimehooksv1.DefaultHandlersTimeoutSeconds) } discovery.Handlers[i] = handler diff --git a/internal/runtime/client/client_test.go b/internal/runtime/client/client_test.go index 347780d935e4..4a9ceb165274 100644 --- a/internal/runtime/client/client_test.go +++ b/internal/runtime/client/client_test.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -194,7 +194,7 @@ func TestClient_httpCall(t *testing.T) { defer srv.Close() // set url to srv for in tt.opts - tt.opts.config.URL = pointer.String(srv.URL) + tt.opts.config.URL = ptr.To(srv.URL) tt.opts.config.CABundle = testcerts.CACert } @@ -257,7 +257,7 @@ func TestURLForExtension(t *testing.T) { Service: &runtimev1.ServiceReference{ Namespace: "test1", Name: "extension-service", - Port: pointer.Int32(8443), + Port: ptr.To[int32](8443), }, }, gvh: gvh, @@ -277,7 +277,7 @@ func TestURLForExtension(t *testing.T) { Service: &runtimev1.ServiceReference{ Namespace: "test1", Name: "extension-service", - Port: pointer.Int32(8443), + Port: ptr.To[int32](8443), }, CABundle: []byte("some-ca-data"), }, @@ -295,7 +295,7 @@ func TestURLForExtension(t *testing.T) { name: "ClientConfig using URL should have correct URL values", args: args{ config: runtimev1.ClientConfig{ - URL: pointer.String("https://extension-host.com"), + URL: ptr.To("https://extension-host.com"), }, gvh: gvh, extensionHandlerName: "test-handler", @@ -408,7 +408,7 @@ func Test_defaultAndValidateDiscoveryResponse(t *testing.T) { Hook: "FakeHook", APIVersion: fakev1alpha1.GroupVersion.String(), }, - TimeoutSeconds: pointer.Int32(100), + TimeoutSeconds: ptr.To[int32](100), }}, }, wantErr: true, @@ -426,7 +426,7 @@ func Test_defaultAndValidateDiscoveryResponse(t *testing.T) { Hook: "FakeHook", APIVersion: fakev1alpha1.GroupVersion.String(), }, - TimeoutSeconds: pointer.Int32(-1), + TimeoutSeconds: ptr.To[int32](-1), }}, }, wantErr: true, @@ -444,7 +444,7 @@ func Test_defaultAndValidateDiscoveryResponse(t *testing.T) { Hook: "FakeHook", APIVersion: fakev1alpha1.GroupVersion.String(), }, - TimeoutSeconds: pointer.Int32(20), + TimeoutSeconds: ptr.To[int32](20), FailurePolicy: &invalidFailurePolicy, }}, }, @@ -546,7 +546,7 @@ func TestClient_CallExtension(t *testing.T) { Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ // Set a fake URL, in test cases where we start the test server the URL will be overridden. - URL: pointer.String("https://127.0.0.1/"), + URL: ptr.To("https://127.0.0.1/"), CABundle: testcerts.CACert, }, NamespaceSelector: &metav1.LabelSelector{}, @@ -559,7 +559,7 @@ func TestClient_CallExtension(t *testing.T) { APIVersion: fakev1alpha1.GroupVersion.String(), Hook: "FakeHook", }, - TimeoutSeconds: pointer.Int32(1), + TimeoutSeconds: ptr.To[int32](1), FailurePolicy: &fpFail, }, }, @@ -569,7 +569,7 @@ func TestClient_CallExtension(t *testing.T) { Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ // Set a fake URL, in test cases where we start the test server the URL will be overridden. - URL: pointer.String("https://127.0.0.1/"), + URL: ptr.To("https://127.0.0.1/"), CABundle: testcerts.CACert, }, NamespaceSelector: &metav1.LabelSelector{}}, @@ -581,7 +581,7 @@ func TestClient_CallExtension(t *testing.T) { APIVersion: fakev1alpha1.GroupVersion.String(), Hook: "FakeHook", }, - TimeoutSeconds: pointer.Int32(1), + TimeoutSeconds: ptr.To[int32](1), FailurePolicy: &fpIgnore, }, }, @@ -755,7 +755,7 @@ func TestClient_CallExtension(t *testing.T) { // Set the URL to the real address of the test server. for i := range tt.registeredExtensionConfigs { - tt.registeredExtensionConfigs[i].Spec.ClientConfig.URL = pointer.String(fmt.Sprintf("https://%s/", srv.Listener.Addr().String())) + tt.registeredExtensionConfigs[i].Spec.ClientConfig.URL = ptr.To(fmt.Sprintf("https://%s/", srv.Listener.Addr().String())) } } @@ -884,7 +884,7 @@ func TestClient_CallAllExtensions(t *testing.T) { Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ // Set a fake URL, in test cases where we start the test server the URL will be overridden. - URL: pointer.String("https://127.0.0.1/"), + URL: ptr.To("https://127.0.0.1/"), CABundle: testcerts.CACert, }, NamespaceSelector: &metav1.LabelSelector{}, @@ -897,7 +897,7 @@ func TestClient_CallAllExtensions(t *testing.T) { APIVersion: fakev1alpha1.GroupVersion.String(), Hook: "FakeHook", }, - TimeoutSeconds: pointer.Int32(1), + TimeoutSeconds: ptr.To[int32](1), FailurePolicy: &fpFail, }, { @@ -906,7 +906,7 @@ func TestClient_CallAllExtensions(t *testing.T) { APIVersion: fakev1alpha1.GroupVersion.String(), Hook: "FakeHook", }, - TimeoutSeconds: pointer.Int32(1), + TimeoutSeconds: ptr.To[int32](1), FailurePolicy: &fpFail, }, { @@ -915,7 +915,7 @@ func TestClient_CallAllExtensions(t *testing.T) { APIVersion: fakev1alpha1.GroupVersion.String(), Hook: "FakeHook", }, - TimeoutSeconds: pointer.Int32(1), + TimeoutSeconds: ptr.To[int32](1), FailurePolicy: &fpFail, }, }, @@ -1040,7 +1040,7 @@ func TestClient_CallAllExtensions(t *testing.T) { // Set the URL to the real address of the test server. for i := range tt.registeredExtensionConfigs { - tt.registeredExtensionConfigs[i].Spec.ClientConfig.URL = pointer.String(fmt.Sprintf("https://%s/", srv.Listener.Addr().String())) + tt.registeredExtensionConfigs[i].Spec.ClientConfig.URL = ptr.To(fmt.Sprintf("https://%s/", srv.Listener.Addr().String())) } } diff --git a/internal/runtime/registry/registry_test.go b/internal/runtime/registry/registry_test.go index c756957610fe..bc9de5140288 100644 --- a/internal/runtime/registry/registry_test.go +++ b/internal/runtime/registry/registry_test.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" @@ -101,7 +101,7 @@ func TestRegistry(t *testing.T) { }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: pointer.String("https://extesions1.com/"), + URL: ptr.To("https://extesions1.com/"), }, }, Status: runtimev1.ExtensionConfigStatus{ @@ -137,7 +137,7 @@ func TestRegistry(t *testing.T) { }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: pointer.String("https://extesions2.com/"), + URL: ptr.To("https://extesions2.com/"), }, }, Status: runtimev1.ExtensionConfigStatus{ diff --git a/internal/test/builder/crds.go b/internal/test/builder/crds.go index 79553dca085b..67caa0e297e6 100644 --- a/internal/test/builder/crds.go +++ b/internal/test/builder/crds.go @@ -23,7 +23,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/contract" @@ -33,11 +33,11 @@ func untypedCRD(gvk schema.GroupVersionKind) *apiextensionsv1.CustomResourceDefi return generateCRD(gvk, map[string]apiextensionsv1.JSONSchemaProps{ "spec": { Type: "object", - XPreserveUnknownFields: pointer.Bool(true), + XPreserveUnknownFields: ptr.To(true), }, "status": { Type: "object", - XPreserveUnknownFields: pointer.Bool(true), + XPreserveUnknownFields: ptr.To(true), }, }) } diff --git a/internal/topology/variables/cluster_variable_validation_test.go b/internal/topology/variables/cluster_variable_validation_test.go index c9abc2a24016..8c91011ad434 100644 --- a/internal/topology/variables/cluster_variable_validation_test.go +++ b/internal/topology/variables/cluster_variable_validation_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/gomega" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -47,7 +47,7 @@ func Test_ValidateClusterVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -62,7 +62,7 @@ func Test_ValidateClusterVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -122,7 +122,7 @@ func Test_ValidateClusterVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -137,7 +137,7 @@ func Test_ValidateClusterVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -167,7 +167,7 @@ func Test_ValidateClusterVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -182,7 +182,7 @@ func Test_ValidateClusterVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -656,7 +656,7 @@ func Test_ValidateClusterVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -676,7 +676,7 @@ func Test_ValidateClusterVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Maximum: pointer.Int64(10), + Maximum: ptr.To[int64](10), }, }, }, @@ -696,7 +696,7 @@ func Test_ValidateClusterVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -717,7 +717,7 @@ func Test_ValidateClusterVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -736,7 +736,7 @@ func Test_ValidateClusterVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -995,7 +995,7 @@ func Test_ValidateClusterVariable(t *testing.T) { }, "integerProperty": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, "enumProperty": { Type: "string", @@ -1197,7 +1197,7 @@ func Test_ValidateClusterVariable(t *testing.T) { Items: &clusterv1.JSONSchemaProps{ Type: "string", }, - MaxItems: pointer.Int64(3), + MaxItems: ptr.To[int64](3), }, }, }, @@ -1220,7 +1220,7 @@ func Test_ValidateClusterVariable(t *testing.T) { Items: &clusterv1.JSONSchemaProps{ Type: "string", }, - MinItems: pointer.Int64(3), + MinItems: ptr.To[int64](3), }, }, }, diff --git a/internal/topology/variables/clusterclass_variable_validation_test.go b/internal/topology/variables/clusterclass_variable_validation_test.go index 75b299dc7f11..040005389899 100644 --- a/internal/topology/variables/clusterclass_variable_validation_test.go +++ b/internal/topology/variables/clusterclass_variable_validation_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -42,7 +42,7 @@ func Test_ValidateClusterClassVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -51,7 +51,7 @@ func Test_ValidateClusterClassVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -66,7 +66,7 @@ func Test_ValidateClusterClassVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -75,7 +75,7 @@ func Test_ValidateClusterClassVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "number", - Maximum: pointer.Int64(1), + Maximum: ptr.To[int64](1), }, }, }, @@ -85,7 +85,7 @@ func Test_ValidateClusterClassVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -94,7 +94,7 @@ func Test_ValidateClusterClassVariables(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -132,7 +132,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -144,7 +144,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -156,7 +156,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -168,7 +168,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -181,7 +181,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -194,7 +194,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MinLength: pointer.Int64(1), + MinLength: ptr.To[int64](1), }, }, }, @@ -514,7 +514,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MaxLength: pointer.Int64(6), + MaxLength: ptr.To[int64](6), Default: &apiextensionsv1.JSON{Raw: []byte(`"short"`)}, }, }, @@ -527,7 +527,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MaxLength: pointer.Int64(6), + MaxLength: ptr.To[int64](6), Default: &apiextensionsv1.JSON{Raw: []byte(`"veryLongValueIsInvalid"`)}, }, }, @@ -548,7 +548,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { "replicas": { Type: "integer", Default: &apiextensionsv1.JSON{Raw: []byte(`100`)}, - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -571,7 +571,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { "replicas": { Type: "integer", Default: &apiextensionsv1.JSON{Raw: []byte(`-100`)}, - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -597,7 +597,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -623,7 +623,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -651,12 +651,12 @@ func Test_ValidateClusterClassVariable(t *testing.T) { "replicas": { Type: "integer", Default: &apiextensionsv1.JSON{Raw: []byte(`100`)}, - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, "value": { Type: "integer", Default: &apiextensionsv1.JSON{Raw: []byte(`100`)}, - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -685,7 +685,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { "replicas": { Type: "integer", Default: &apiextensionsv1.JSON{Raw: []byte(`100`)}, - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -702,7 +702,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MaxLength: pointer.Int64(6), + MaxLength: ptr.To[int64](6), Example: &apiextensionsv1.JSON{Raw: []byte(`"short"`)}, }, }, @@ -715,7 +715,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MaxLength: pointer.Int64(6), + MaxLength: ptr.To[int64](6), Example: &apiextensionsv1.JSON{Raw: []byte(`"veryLongValueIsInvalid"`)}, }, }, @@ -735,7 +735,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(0), + Minimum: ptr.To[int64](0), Example: &apiextensionsv1.JSON{Raw: []byte(`100`)}, }, }, @@ -758,7 +758,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(0), + Minimum: ptr.To[int64](0), Example: &apiextensionsv1.JSON{Raw: []byte(`-100`)}, }, }, @@ -785,7 +785,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -811,7 +811,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -827,7 +827,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MaxLength: pointer.Int64(6), + MaxLength: ptr.To[int64](6), Enum: []apiextensionsv1.JSON{ {Raw: []byte(`"short1"`)}, {Raw: []byte(`"short2"`)}, @@ -843,7 +843,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Schema: clusterv1.VariableSchema{ OpenAPIV3Schema: clusterv1.JSONSchemaProps{ Type: "string", - MaxLength: pointer.Int64(6), + MaxLength: ptr.To[int64](6), Enum: []apiextensionsv1.JSON{ {Raw: []byte(`"veryLongValueIsInvalid"`)}, {Raw: []byte(`"short"`)}, @@ -866,7 +866,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(0), + Minimum: ptr.To[int64](0), Enum: []apiextensionsv1.JSON{ {Raw: []byte(`100`)}, {Raw: []byte(`5`)}, @@ -892,7 +892,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(0), + Minimum: ptr.To[int64](0), Enum: []apiextensionsv1.JSON{ {Raw: []byte(`100`)}, {Raw: []byte(`-100`)}, @@ -927,7 +927,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, @@ -958,7 +958,7 @@ func Test_ValidateClusterClassVariable(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "replicas": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, }, }, diff --git a/internal/topology/variables/schema.go b/internal/topology/variables/schema.go index 13225c24d1f7..ffff13091835 100644 --- a/internal/topology/variables/schema.go +++ b/internal/topology/variables/schema.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -53,7 +53,7 @@ func convertToAPIExtensionsJSONSchemaProps(schema *clusterv1.JSONSchemaProps, fl // apiextensions.JSONSchemaProps only allows setting XPreserveUnknownFields // to true or undefined, false is forbidden. if schema.XPreserveUnknownFields { - props.XPreserveUnknownFields = pointer.Bool(true) + props.XPreserveUnknownFields = ptr.To(true) } if schema.Default != nil && schema.Default.Raw != nil { diff --git a/internal/topology/variables/schema_test.go b/internal/topology/variables/schema_test.go index d3303081651a..3ec28cb68cdf 100644 --- a/internal/topology/variables/schema_test.go +++ b/internal/topology/variables/schema_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -42,23 +42,23 @@ func Test_convertToAPIExtensionsJSONSchemaProps(t *testing.T) { schema: &clusterv1.JSONSchemaProps{ Type: "integer", Format: "uri", - MaxLength: pointer.Int64(4), - MinLength: pointer.Int64(2), + MaxLength: ptr.To[int64](4), + MinLength: ptr.To[int64](2), Pattern: "abc.*", - Maximum: pointer.Int64(43), + Maximum: ptr.To[int64](43), ExclusiveMaximum: true, - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), ExclusiveMinimum: false, }, want: &apiextensions.JSONSchemaProps{ Type: "integer", Format: "uri", - MaxLength: pointer.Int64(4), - MinLength: pointer.Int64(2), + MaxLength: ptr.To[int64](4), + MinLength: ptr.To[int64](2), Pattern: "abc.*", - Maximum: pointer.Float64(43), + Maximum: ptr.To[float64](43), ExclusiveMaximum: true, - Minimum: pointer.Float64(1), + Minimum: ptr.To[float64](1), ExclusiveMinimum: false, }, }, @@ -100,13 +100,13 @@ func Test_convertToAPIExtensionsJSONSchemaProps(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "property1": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, "property2": { Type: "string", Format: "uri", - MinLength: pointer.Int64(2), - MaxLength: pointer.Int64(4), + MinLength: ptr.To[int64](2), + MaxLength: ptr.To[int64](4), }, }, }, @@ -114,13 +114,13 @@ func Test_convertToAPIExtensionsJSONSchemaProps(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "property1": { Type: "integer", - Minimum: pointer.Float64(1), + Minimum: ptr.To[float64](1), }, "property2": { Type: "string", Format: "uri", - MinLength: pointer.Int64(2), - MaxLength: pointer.Int64(4), + MinLength: ptr.To[int64](2), + MaxLength: ptr.To[int64](4), }, }, }, @@ -132,13 +132,13 @@ func Test_convertToAPIExtensionsJSONSchemaProps(t *testing.T) { Properties: map[string]clusterv1.JSONSchemaProps{ "property1": { Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), }, "property2": { Type: "string", Format: "uri", - MinLength: pointer.Int64(2), - MaxLength: pointer.Int64(4), + MinLength: ptr.To[int64](2), + MaxLength: ptr.To[int64](4), }, }, }, @@ -150,13 +150,13 @@ func Test_convertToAPIExtensionsJSONSchemaProps(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "property1": { Type: "integer", - Minimum: pointer.Float64(1), + Minimum: ptr.To[float64](1), }, "property2": { Type: "string", Format: "uri", - MinLength: pointer.Int64(2), - MaxLength: pointer.Int64(4), + MinLength: ptr.To[int64](2), + MaxLength: ptr.To[int64](4), }, }, }, @@ -168,20 +168,20 @@ func Test_convertToAPIExtensionsJSONSchemaProps(t *testing.T) { schema: &clusterv1.JSONSchemaProps{ Items: &clusterv1.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Int64(1), + Minimum: ptr.To[int64](1), Format: "uri", - MinLength: pointer.Int64(2), - MaxLength: pointer.Int64(4), + MinLength: ptr.To[int64](2), + MaxLength: ptr.To[int64](4), }, }, want: &apiextensions.JSONSchemaProps{ Items: &apiextensions.JSONSchemaPropsOrArray{ Schema: &apiextensions.JSONSchemaProps{ Type: "integer", - Minimum: pointer.Float64(1), + Minimum: ptr.To[float64](1), Format: "uri", - MinLength: pointer.Int64(2), - MaxLength: pointer.Int64(4), + MinLength: ptr.To[int64](2), + MaxLength: ptr.To[int64](4), }, }, }, diff --git a/internal/util/ssa/patch_test.go b/internal/util/ssa/patch_test.go index e772422b4cec..0139bce49d35 100644 --- a/internal/util/ssa/patch_test.go +++ b/internal/util/ssa/patch_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -106,10 +106,10 @@ func TestPatch(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "cluster-1", - Version: pointer.String("v1.25.0"), + Version: ptr.To("v1.25.0"), NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, Bootstrap: clusterv1.Bootstrap{ - DataSecretName: pointer.String("data-secret"), + DataSecretName: ptr.To("data-secret"), }, }, } diff --git a/internal/webhooks/cluster_test.go b/internal/webhooks/cluster_test.go index e4a9a802d39a..663d09be99c8 100644 --- a/internal/webhooks/cluster_test.go +++ b/internal/webhooks/cluster_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/client/interceptor" @@ -1355,7 +1355,7 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { WithVersion("v1.22.2"). WithControlPlaneReplicas(3). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), }). Build()). Build(), @@ -1418,7 +1418,7 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { WithVersion("v1.22.2"). WithControlPlaneReplicas(3). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), }). Build()). Build(), @@ -1437,7 +1437,7 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { WithVersion("v1.22.2"). WithControlPlaneReplicas(3). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyConditions: []clusterv1.UnhealthyCondition{ { @@ -1467,7 +1467,7 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { builder.MachineDeploymentTopology("md1"). WithClass("worker-class"). WithMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), }). Build(), ). @@ -1521,7 +1521,7 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { builder.MachineDeploymentTopology("md1"). WithClass("worker-class"). WithMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), }). Build(), ). @@ -1549,7 +1549,7 @@ func TestClusterTopologyValidationWithClient(t *testing.T) { builder.MachineDeploymentTopology("md1"). WithClass("worker-class"). WithMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyConditions: []clusterv1.UnhealthyCondition{ { diff --git a/internal/webhooks/clusterclass_test.go b/internal/webhooks/clusterclass_test.go index 4eddd125bc94..c9f02d5310a4 100644 --- a/internal/webhooks/clusterclass_test.go +++ b/internal/webhooks/clusterclass_test.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -1173,7 +1173,7 @@ func TestClusterClassValidation(t *testing.T) { WithControlPlaneTemplate( builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). Build()). - WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: pointer.String("{{ .cluster.name }}-cp-{{ .random }}")}). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: ptr.To("{{ .cluster.name }}-cp-{{ .random }}")}). WithControlPlaneInfrastructureMachineTemplate( builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). Build()). @@ -1183,7 +1183,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). - WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: pointer.String("{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}")}). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: ptr.To("{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}")}). Build()). WithWorkerMachinePoolClasses( *builder.MachinePoolClass("bb"). @@ -1191,7 +1191,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "infra2").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap2").Build()). - WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: pointer.String("{{ .cluster.name }}-md-{{ .machinePool.topologyName }}-{{ .random }}")}). + WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: ptr.To("{{ .cluster.name }}-md-{{ .machinePool.topologyName }}-{{ .random }}")}). Build()). Build(), expectErr: false, @@ -1204,7 +1204,7 @@ func TestClusterClassValidation(t *testing.T) { WithControlPlaneTemplate( builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). Build()). - WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: pointer.String("template-cp-{{ .invalidkey }}")}). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: ptr.To("template-cp-{{ .invalidkey }}")}). WithControlPlaneInfrastructureMachineTemplate( builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). Build()). @@ -1219,7 +1219,7 @@ func TestClusterClassValidation(t *testing.T) { WithControlPlaneTemplate( builder.ControlPlaneTemplate(metav1.NamespaceDefault, "cp1"). Build()). - WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: pointer.String("template-cp-{{ .cluster.name }}-")}). + WithControlPlaneNamingStrategy(&clusterv1.ControlPlaneClassNamingStrategy{Template: ptr.To("template-cp-{{ .cluster.name }}-")}). WithControlPlaneInfrastructureMachineTemplate( builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cpInfra1"). Build()). @@ -1243,7 +1243,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). - WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: pointer.String("template-md-{{ .cluster.name")}). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: ptr.To("template-md-{{ .cluster.name")}). Build()). Build(), expectErr: true, @@ -1265,7 +1265,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap1").Build()). - WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: pointer.String("template-md-{{ .cluster.name }}-")}). + WithNamingStrategy(&clusterv1.MachineDeploymentClassNamingStrategy{Template: ptr.To("template-md-{{ .cluster.name }}-")}). Build()). Build(), expectErr: true, @@ -1287,7 +1287,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "infra2").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap2").Build()). - WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: pointer.String("template-mp-{{ .cluster.name")}). + WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: ptr.To("template-mp-{{ .cluster.name")}). Build()). Build(), expectErr: true, @@ -1309,7 +1309,7 @@ func TestClusterClassValidation(t *testing.T) { builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "infra2").Build()). WithBootstrapTemplate( builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap2").Build()). - WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: pointer.String("template-mp-{{ .cluster.name }}-")}). + WithNamingStrategy(&clusterv1.MachinePoolClassNamingStrategy{Template: ptr.To("template-mp-{{ .cluster.name }}-")}). Build()). Build(), expectErr: true, @@ -1547,7 +1547,7 @@ func TestClusterClassValidationWithClusterAwareChecks(t *testing.T) { WithTopology(builder.ClusterTopology(). WithClass("clusterclass1"). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), }). Build()). Build(), @@ -1610,7 +1610,7 @@ func TestClusterClassValidationWithClusterAwareChecks(t *testing.T) { WithTopology(builder.ClusterTopology(). WithClass("clusterclass1"). WithControlPlaneMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyConditions: []clusterv1.UnhealthyCondition{ { @@ -1650,7 +1650,7 @@ func TestClusterClassValidationWithClusterAwareChecks(t *testing.T) { WithMachineDeployment(builder.MachineDeploymentTopology("md1"). WithClass("mdclass1"). WithMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), }). Build()). Build()). @@ -1743,7 +1743,7 @@ func TestClusterClassValidationWithClusterAwareChecks(t *testing.T) { WithMachineDeployment(builder.MachineDeploymentTopology("md1"). WithClass("mdclass1"). WithMachineHealthCheck(&clusterv1.MachineHealthCheckTopology{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), MachineHealthCheckClass: clusterv1.MachineHealthCheckClass{ UnhealthyConditions: []clusterv1.UnhealthyCondition{ { diff --git a/internal/webhooks/machine_test.go b/internal/webhooks/machine_test.go index 3568250cbe20..0b1732900c94 100644 --- a/internal/webhooks/machine_test.go +++ b/internal/webhooks/machine_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/webhooks/util" @@ -37,7 +37,7 @@ func TestMachineDefault(t *testing.T) { }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ConfigRef: &corev1.ObjectReference{}}, - Version: pointer.String("1.17.5"), + Version: ptr.To("1.17.5"), }, } @@ -66,7 +66,7 @@ func TestMachineBootstrapValidation(t *testing.T) { }, { name: "should not return error if dataSecretName is set", - bootstrap: clusterv1.Bootstrap{ConfigRef: nil, DataSecretName: pointer.String("test")}, + bootstrap: clusterv1.Bootstrap{ConfigRef: nil, DataSecretName: ptr.To("test")}, expectErr: false, }, { @@ -261,7 +261,7 @@ func TestMachineVersionValidation(t *testing.T) { m := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ Version: &tt.version, - Bootstrap: clusterv1.Bootstrap{ConfigRef: nil, DataSecretName: pointer.String("test")}, + Bootstrap: clusterv1.Bootstrap{ConfigRef: nil, DataSecretName: ptr.To("test")}, }, } webhook := &Machine{} diff --git a/internal/webhooks/machinedeployment.go b/internal/webhooks/machinedeployment.go index ed7a9ca417f3..80b1203f5bae 100644 --- a/internal/webhooks/machinedeployment.go +++ b/internal/webhooks/machinedeployment.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -97,18 +97,18 @@ func (webhook *MachineDeployment) Default(ctx context.Context, obj runtime.Objec if err != nil { return err } - m.Spec.Replicas = pointer.Int32(replicas) + m.Spec.Replicas = ptr.To[int32](replicas) if m.Spec.MinReadySeconds == nil { - m.Spec.MinReadySeconds = pointer.Int32(0) + m.Spec.MinReadySeconds = ptr.To[int32](0) } if m.Spec.RevisionHistoryLimit == nil { - m.Spec.RevisionHistoryLimit = pointer.Int32(1) + m.Spec.RevisionHistoryLimit = ptr.To[int32](1) } if m.Spec.ProgressDeadlineSeconds == nil { - m.Spec.ProgressDeadlineSeconds = pointer.Int32(600) + m.Spec.ProgressDeadlineSeconds = ptr.To[int32](600) } if m.Spec.Selector.MatchLabels == nil { diff --git a/internal/webhooks/machinedeployment_test.go b/internal/webhooks/machinedeployment_test.go index 34125b5b7014..e563717d8676 100644 --- a/internal/webhooks/machinedeployment_test.go +++ b/internal/webhooks/machinedeployment_test.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -43,7 +43,7 @@ func TestMachineDeploymentDefault(t *testing.T) { ClusterName: "test-cluster", Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.19.10"), + Version: ptr.To("1.19.10"), }, }, }, @@ -66,10 +66,10 @@ func TestMachineDeploymentDefault(t *testing.T) { g.Expect(md.Labels[clusterv1.ClusterNameLabel]).To(Equal(md.Spec.ClusterName)) - g.Expect(md.Spec.MinReadySeconds).To(Equal(pointer.Int32(0))) - g.Expect(md.Spec.Replicas).To(Equal(pointer.Int32(1))) - g.Expect(md.Spec.RevisionHistoryLimit).To(Equal(pointer.Int32(1))) - g.Expect(md.Spec.ProgressDeadlineSeconds).To(Equal(pointer.Int32(600))) + g.Expect(md.Spec.MinReadySeconds).To(Equal(ptr.To[int32](0))) + g.Expect(md.Spec.Replicas).To(Equal(ptr.To[int32](1))) + g.Expect(md.Spec.RevisionHistoryLimit).To(Equal(ptr.To[int32](1))) + g.Expect(md.Spec.ProgressDeadlineSeconds).To(Equal(ptr.To[int32](600))) g.Expect(md.Spec.Strategy).ToNot(BeNil()) g.Expect(md.Spec.Selector.MatchLabels).To(HaveKeyWithValue(clusterv1.MachineDeploymentNameLabel, "test-md")) @@ -97,7 +97,7 @@ func TestCalculateMachineDeploymentReplicas(t *testing.T) { name: "if new MD has replicas set, keep that value", newMD: &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(5), + Replicas: ptr.To[int32](5), }, }, expectedReplicas: 5, @@ -190,7 +190,7 @@ func TestCalculateMachineDeploymentReplicas(t *testing.T) { }, oldMD: &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, }, expectedReplicas: 3, @@ -207,7 +207,7 @@ func TestCalculateMachineDeploymentReplicas(t *testing.T) { }, oldMD: &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(15), + Replicas: ptr.To[int32](15), }, }, expectedReplicas: 7, @@ -224,7 +224,7 @@ func TestCalculateMachineDeploymentReplicas(t *testing.T) { }, oldMD: &clusterv1.MachineDeployment{ Spec: clusterv1.MachineDeploymentSpec{ - Replicas: pointer.Int32(4), + Replicas: ptr.To[int32](4), }, }, expectedReplicas: 4, @@ -467,7 +467,7 @@ func TestMachineDeploymentVersionValidation(t *testing.T) { Spec: clusterv1.MachineDeploymentSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String(tt.version), + Version: ptr.To(tt.version), }, }, }, diff --git a/internal/webhooks/machineset.go b/internal/webhooks/machineset.go index f0f38f84e42e..0b910cdda985 100644 --- a/internal/webhooks/machineset.go +++ b/internal/webhooks/machineset.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -99,7 +99,7 @@ func (webhook *MachineSet) Default(ctx context.Context, obj runtime.Object) erro if err != nil { return err } - m.Spec.Replicas = pointer.Int32(replicas) + m.Spec.Replicas = ptr.To[int32](replicas) if m.Spec.DeletePolicy == "" { randomPolicy := string(clusterv1.RandomMachineSetDeletePolicy) diff --git a/internal/webhooks/machineset_test.go b/internal/webhooks/machineset_test.go index d9779b799a71..a972b60dda11 100644 --- a/internal/webhooks/machineset_test.go +++ b/internal/webhooks/machineset_test.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -40,7 +40,7 @@ func TestMachineSetDefault(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.19.10"), + Version: ptr.To("1.19.10"), }, }, }, @@ -75,7 +75,7 @@ func TestCalculateMachineSetReplicas(t *testing.T) { name: "if new MS has replicas set, keep that value", newMS: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(5), + Replicas: ptr.To[int32](5), }, }, expectedReplicas: 5, @@ -168,7 +168,7 @@ func TestCalculateMachineSetReplicas(t *testing.T) { }, oldMS: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(1), + Replicas: ptr.To[int32](1), }, }, expectedReplicas: 3, @@ -185,7 +185,7 @@ func TestCalculateMachineSetReplicas(t *testing.T) { }, oldMS: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(15), + Replicas: ptr.To[int32](15), }, }, expectedReplicas: 7, @@ -202,7 +202,7 @@ func TestCalculateMachineSetReplicas(t *testing.T) { }, oldMS: &clusterv1.MachineSet{ Spec: clusterv1.MachineSetSpec{ - Replicas: pointer.Int32(4), + Replicas: ptr.To[int32](4), }, }, expectedReplicas: 4, @@ -390,7 +390,7 @@ func TestMachineSetVersionValidation(t *testing.T) { Spec: clusterv1.MachineSetSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String(tt.version), + Version: ptr.To(tt.version), }, }, }, diff --git a/internal/webhooks/patch_validation_test.go b/internal/webhooks/patch_validation_test.go index c090be6db428..a7a7b7cfc8e3 100644 --- a/internal/webhooks/patch_validation_test.go +++ b/internal/webhooks/patch_validation_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/feature" @@ -69,7 +69,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/variableSetting/variableValue1", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName1"), + Variable: ptr.To("variableName1"), }, }, }, @@ -92,7 +92,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/variableSetting/variableValue2", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName2"), + Variable: ptr.To("variableName2"), }, }, }, @@ -157,7 +157,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), + Variable: ptr.To("variableName"), }, }, }, @@ -211,7 +211,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/cluster-name", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName1"), + Variable: ptr.To("variableName1"), }, }, }, @@ -234,7 +234,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/variableSetting/variableValue", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName2"), + Variable: ptr.To("variableName2"), }, }, }, @@ -283,7 +283,7 @@ func TestValidatePatches(t *testing.T) { Patches: []clusterv1.ClusterClassPatch{ { Name: "patch1", - EnabledIf: pointer.String(`template {{ .variableB }}`), + EnabledIf: ptr.To(`template {{ .variableB }}`), Definitions: []clusterv1.PatchDefinition{}, }, }, @@ -306,7 +306,7 @@ func TestValidatePatches(t *testing.T) { Patches: []clusterv1.ClusterClassPatch{ { Name: "patch1", - EnabledIf: pointer.String(`template {{{{{{{{ .variableB }}`), + EnabledIf: ptr.To(`template {{{{{{{{ .variableB }}`), }, }, }, @@ -428,7 +428,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/0/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), + Variable: ptr.To("variableName"), }, }, }, @@ -481,7 +481,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/1/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), + Variable: ptr.To("variableName"), }, }, }, @@ -535,7 +535,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/01/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), + Variable: ptr.To("variableName"), }, }, }, @@ -589,7 +589,7 @@ func TestValidatePatches(t *testing.T) { Op: "remove", Path: "/spec/template/0/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), + Variable: ptr.To("variableName"), }, }, }, @@ -643,7 +643,7 @@ func TestValidatePatches(t *testing.T) { Op: "replace", Path: "/spec/template/0/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), + Variable: ptr.To("variableName"), }, }, }, @@ -739,7 +739,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), + Variable: ptr.To("variableName"), }, Value: &apiextensionsv1.JSON{Raw: []byte("1")}, }, @@ -1004,8 +1004,8 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), - Template: pointer.String(`template {{ .variableB }}`), + Variable: ptr.To("variableName"), + Template: ptr.To(`template {{ .variableB }}`), }, }, }, @@ -1059,7 +1059,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Template: pointer.String(`template {{ .variableB }}`), + Template: ptr.To(`template {{ .variableB }}`), }, }, }, @@ -1112,7 +1112,7 @@ func TestValidatePatches(t *testing.T) { Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ // Template is invalid - too many leading curly braces. - Template: pointer.String(`template {{{{{{{{ .variableB }}`), + Template: ptr.To(`template {{{{{{{{ .variableB }}`), }, }, }, @@ -1166,7 +1166,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("undefinedVariable"), + Variable: ptr.To("undefinedVariable"), }, }, }, @@ -1218,7 +1218,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName"), + Variable: ptr.To("variableName"), }, }, }, @@ -1270,7 +1270,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("variableName.nestedField"), + Variable: ptr.To("variableName.nestedField"), }, }, }, @@ -1327,7 +1327,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.notDefined"), + Variable: ptr.To("builtin.notDefined"), }, }, }, @@ -1370,7 +1370,7 @@ func TestValidatePatches(t *testing.T) { Op: "add", Path: "/spec/template/spec/", ValueFrom: &clusterv1.JSONPatchValue{ - Variable: pointer.String("builtin.machineDeployment.version"), + Variable: ptr.To("builtin.machineDeployment.version"), }, }, }, @@ -1401,8 +1401,8 @@ func TestValidatePatches(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("generate-extension"), - ValidateExtension: pointer.String("generate-extension"), + GenerateExtension: ptr.To("generate-extension"), + ValidateExtension: ptr.To("generate-extension"), }, }, }, @@ -1428,8 +1428,8 @@ func TestValidatePatches(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("generate-extension"), - ValidateExtension: pointer.String("generate-extension"), + GenerateExtension: ptr.To("generate-extension"), + ValidateExtension: ptr.To("generate-extension"), }, }, }, @@ -1479,8 +1479,8 @@ func TestValidatePatches(t *testing.T) { { Name: "patch1", External: &clusterv1.ExternalPatchDefinition{ - GenerateExtension: pointer.String("generate-extension"), - ValidateExtension: pointer.String("generate-extension"), + GenerateExtension: ptr.To("generate-extension"), + ValidateExtension: ptr.To("generate-extension"), }, Definitions: []clusterv1.PatchDefinition{}, }, diff --git a/internal/webhooks/runtime/extensionconfig_webhook.go b/internal/webhooks/runtime/extensionconfig_webhook.go index 6ebcca3aaf41..1a006ae0e211 100644 --- a/internal/webhooks/runtime/extensionconfig_webhook.go +++ b/internal/webhooks/runtime/extensionconfig_webhook.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -65,7 +65,7 @@ func (webhook *ExtensionConfig) Default(_ context.Context, obj runtime.Object) e } if extensionConfig.Spec.ClientConfig.Service != nil { if extensionConfig.Spec.ClientConfig.Service.Port == nil { - extensionConfig.Spec.ClientConfig.Service.Port = pointer.Int32(443) + extensionConfig.Spec.ClientConfig.Service.Port = ptr.To[int32](443) } } return nil diff --git a/internal/webhooks/runtime/extensionconfig_webhook_test.go b/internal/webhooks/runtime/extensionconfig_webhook_test.go index 2905fefb5574..9ae02c19fb40 100644 --- a/internal/webhooks/runtime/extensionconfig_webhook_test.go +++ b/internal/webhooks/runtime/extensionconfig_webhook_test.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" @@ -48,13 +48,13 @@ func TestExtensionConfigValidationFeatureGated(t *testing.T) { }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: pointer.String("https://extension-address.com"), + URL: ptr.To("https://extension-address.com"), }, NamespaceSelector: &metav1.LabelSelector{}, }, } updatedExtension := extension.DeepCopy() - updatedExtension.Spec.ClientConfig.URL = pointer.String("https://a-new-extension-address.com") + updatedExtension.Spec.ClientConfig.URL = ptr.To("https://a-new-extension-address.com") tests := []struct { name string new *runtimev1.ExtensionConfig @@ -130,7 +130,7 @@ func TestExtensionConfigDefault(t *testing.T) { g.Expect(extensionConfigWebhook.Default(ctx, extensionConfig)).To(Succeed()) g.Expect(extensionConfig.Spec.NamespaceSelector).To(BeComparableTo(&metav1.LabelSelector{})) - g.Expect(extensionConfig.Spec.ClientConfig.Service.Port).To(BeComparableTo(pointer.Int32(443))) + g.Expect(extensionConfig.Spec.ClientConfig.Service.Port).To(BeComparableTo(ptr.To[int32](443))) } func TestExtensionConfigValidate(t *testing.T) { @@ -140,7 +140,7 @@ func TestExtensionConfigValidate(t *testing.T) { }, Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ - URL: pointer.String("https://extension-address.com"), + URL: ptr.To("https://extension-address.com"), }, }, } @@ -152,8 +152,8 @@ func TestExtensionConfigValidate(t *testing.T) { Spec: runtimev1.ExtensionConfigSpec{ ClientConfig: runtimev1.ClientConfig{ Service: &runtimev1.ServiceReference{ - Path: pointer.String("/path/to/handler"), - Port: pointer.Int32(1), + Path: ptr.To("/path/to/handler"), + Port: ptr.To[int32](1), Name: "foo", Namespace: "bar", }}, @@ -168,13 +168,13 @@ func TestExtensionConfigValidate(t *testing.T) { // Valid updated Extension updatedExtension := extensionWithURL.DeepCopy() - updatedExtension.Spec.ClientConfig.URL = pointer.String("https://a-in-extension-address.com") + updatedExtension.Spec.ClientConfig.URL = ptr.To("https://a-in-extension-address.com") extensionWithoutURLOrService := extensionWithURL.DeepCopy() extensionWithoutURLOrService.Spec.ClientConfig.URL = nil extensionWithInvalidServicePath := extensionWithService.DeepCopy() - extensionWithInvalidServicePath.Spec.ClientConfig.Service.Path = pointer.String("https://example.com") + extensionWithInvalidServicePath.Spec.ClientConfig.Service.Path = ptr.To("https://example.com") extensionWithNoServiceName := extensionWithService.DeepCopy() extensionWithNoServiceName.Spec.ClientConfig.Service.Name = "" @@ -189,13 +189,13 @@ func TestExtensionConfigValidate(t *testing.T) { extensionWithBadServiceNamespace.Spec.ClientConfig.Service.Namespace = "INVALID" badURLExtension := extensionWithURL.DeepCopy() - badURLExtension.Spec.ClientConfig.URL = pointer.String("https//extension-address.com") + badURLExtension.Spec.ClientConfig.URL = ptr.To("https//extension-address.com") badSchemeExtension := extensionWithURL.DeepCopy() - badSchemeExtension.Spec.ClientConfig.URL = pointer.String("unknown://extension-address.com") + badSchemeExtension.Spec.ClientConfig.URL = ptr.To("unknown://extension-address.com") extensionWithInvalidServicePort := extensionWithService.DeepCopy() - extensionWithInvalidServicePort.Spec.ClientConfig.Service.Port = pointer.Int32(90000) + extensionWithInvalidServicePort.Spec.ClientConfig.Service.Port = ptr.To[int32](90000) extensionWithInvalidNamespaceSelector := extensionWithService.DeepCopy() extensionWithInvalidNamespaceSelector.Spec.NamespaceSelector = &metav1.LabelSelector{ diff --git a/test/e2e/autoscaler.go b/test/e2e/autoscaler.go index 278841bb6b53..cf7a59b34286 100644 --- a/test/e2e/autoscaler.go +++ b/test/e2e/autoscaler.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" @@ -116,8 +116,8 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(0), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](0), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), diff --git a/test/e2e/autoscaler_test.go b/test/e2e/autoscaler_test.go index ca8809c16b42..539815bd1e93 100644 --- a/test/e2e/autoscaler_test.go +++ b/test/e2e/autoscaler_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When using the autoscaler with Cluster API using ClusterClass [ClusterClass]", func() { @@ -32,9 +32,9 @@ var _ = Describe("When using the autoscaler with Cluster API using ClusterClass BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), InfrastructureMachineTemplateKind: "dockermachinetemplates", - Flavor: pointer.String("topology-autoscaler"), + Flavor: ptr.To("topology-autoscaler"), AutoscalerVersion: "v1.28.0", } }) diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 8e1c2827dca7..dbb3207833d6 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -137,12 +137,12 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: infrastructureProvider, - Flavor: pointer.StringDeref(input.Flavor, "upgrades"), + Flavor: ptr.Deref(input.Flavor, "upgrades"), Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), - ControlPlaneMachineCount: pointer.Int64(controlPlaneMachineCount), - WorkerMachineCount: pointer.Int64(workerMachineCount), + ControlPlaneMachineCount: ptr.To[int64](controlPlaneMachineCount), + WorkerMachineCount: ptr.To[int64](workerMachineCount), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), @@ -178,11 +178,11 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust ) if input.E2EConfig.HasVariable(CPMachineTemplateUpgradeTo) { - upgradeCPMachineTemplateTo = pointer.String(input.E2EConfig.GetVariable(CPMachineTemplateUpgradeTo)) + upgradeCPMachineTemplateTo = ptr.To(input.E2EConfig.GetVariable(CPMachineTemplateUpgradeTo)) } if input.E2EConfig.HasVariable(WorkersMachineTemplateUpgradeTo) { - upgradeWorkersMachineTemplateTo = pointer.String(input.E2EConfig.GetVariable(WorkersMachineTemplateUpgradeTo)) + upgradeWorkersMachineTemplateTo = ptr.To(input.E2EConfig.GetVariable(WorkersMachineTemplateUpgradeTo)) } framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index bd82b0f5c2d0..ada1bb9fef71 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -32,7 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -169,12 +169,12 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: infrastructureProvider, - Flavor: pointer.StringDeref(input.Flavor, "upgrades"), + Flavor: ptr.Deref(input.Flavor, "upgrades"), Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), - ControlPlaneMachineCount: pointer.Int64(controlPlaneMachineCount), - WorkerMachineCount: pointer.Int64(workerMachineCount), + ControlPlaneMachineCount: ptr.To[int64](controlPlaneMachineCount), + WorkerMachineCount: ptr.To[int64](workerMachineCount), }, PreWaitForCluster: func() { beforeClusterCreateTestHandler(ctx, diff --git a/test/e2e/cluster_upgrade_runtimesdk_test.go b/test/e2e/cluster_upgrade_runtimesdk_test.go index 388ca5ac41bc..9f54da50b9b6 100644 --- a/test/e2e/cluster_upgrade_runtimesdk_test.go +++ b/test/e2e/cluster_upgrade_runtimesdk_test.go @@ -23,7 +23,7 @@ import ( "github.com/blang/semver/v4" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When upgrading a workload cluster using ClusterClass with RuntimeSDK [ClusterClass]", func() { @@ -40,9 +40,9 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with Runt BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), // "upgrades" is the same as the "topology" flavor but with an additional MachinePool. - Flavor: pointer.String("upgrades-runtimesdk"), + Flavor: ptr.To("upgrades-runtimesdk"), } }) }) diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index 2715d4d68004..1096bc12bf71 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When upgrading a workload cluster using ClusterClass and testing K8S conformance [Conformance] [K8s-Upgrade] [ClusterClass]", func() { @@ -32,8 +32,8 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass and testi BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), - Flavor: pointer.String("upgrades"), + InfrastructureProvider: ptr.To("docker"), + Flavor: ptr.To("upgrades"), } }) }) @@ -46,12 +46,12 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass [ClusterC BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), - Flavor: pointer.String("topology"), + InfrastructureProvider: ptr.To("docker"), + Flavor: ptr.To("topology"), // This test is run in CI in parallel with other tests. To keep the test duration reasonable // the conformance tests are skipped. - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(2), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](2), SkipConformanceTests: true, } }) @@ -65,13 +65,13 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with a HA BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), // This test is run in CI in parallel with other tests. To keep the test duration reasonable // the conformance tests are skipped. SkipConformanceTests: true, - ControlPlaneMachineCount: pointer.Int64(3), - WorkerMachineCount: pointer.Int64(1), - Flavor: pointer.String("topology"), + ControlPlaneMachineCount: ptr.To[int64](3), + WorkerMachineCount: ptr.To[int64](1), + Flavor: ptr.To("topology"), } }) }) @@ -84,13 +84,13 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with a HA BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), // This test is run in CI in parallel with other tests. To keep the test duration reasonable // the conformance tests are skipped. SkipConformanceTests: true, - ControlPlaneMachineCount: pointer.Int64(3), - WorkerMachineCount: pointer.Int64(1), - Flavor: pointer.String("kcp-scale-in"), + ControlPlaneMachineCount: ptr.To[int64](3), + WorkerMachineCount: ptr.To[int64](1), + Flavor: ptr.To("kcp-scale-in"), } }) }) diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index 99f0d6c8688b..564ff083753e 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -29,7 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -171,8 +171,8 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), diff --git a/test/e2e/clusterclass_changes_test.go b/test/e2e/clusterclass_changes_test.go index d4a33c2439cf..8deb2cddc841 100644 --- a/test/e2e/clusterclass_changes_test.go +++ b/test/e2e/clusterclass_changes_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing ClusterClass changes [ClusterClass]", func() { @@ -32,7 +32,7 @@ var _ = Describe("When testing ClusterClass changes [ClusterClass]", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), Flavor: "topology", // ModifyControlPlaneFields are the ControlPlane fields which will be set on the // ControlPlaneTemplate of the ClusterClass after the initial Cluster creation. diff --git a/test/e2e/clusterclass_rollout.go b/test/e2e/clusterclass_rollout.go index 01e7cbe3bf31..6e1149949057 100644 --- a/test/e2e/clusterclass_rollout.go +++ b/test/e2e/clusterclass_rollout.go @@ -34,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -129,8 +129,8 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), @@ -175,13 +175,13 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas topology.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec topology.NodeDeletionTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec topology.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.MinReadySeconds = pointer.Int32(rand.Int31n(20)) //nolint:gosec + topology.MinReadySeconds = ptr.To[int32](rand.Int31n(20)) //nolint:gosec topology.Strategy = &clusterv1.MachineDeploymentStrategy{ Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 0}, MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 5 + rand.Int31n(20)}, //nolint:gosec - DeletePolicy: pointer.String(string(clusterv1.NewestMachineSetDeletePolicy)), + DeletePolicy: ptr.To(string(clusterv1.NewestMachineSetDeletePolicy)), }, } }, @@ -202,7 +202,7 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas topology.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec topology.NodeDeletionTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec topology.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec - topology.MinReadySeconds = pointer.Int32(rand.Int31n(20)) //nolint:gosec + topology.MinReadySeconds = ptr.To[int32](rand.Int31n(20)) //nolint:gosec }, WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), }) diff --git a/test/e2e/clusterclass_rollout_test.go b/test/e2e/clusterclass_rollout_test.go index be124137d572..600bd90e9185 100644 --- a/test/e2e/clusterclass_rollout_test.go +++ b/test/e2e/clusterclass_rollout_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing ClusterClass rollouts [ClusterClass]", func() { @@ -33,7 +33,7 @@ var _ = Describe("When testing ClusterClass rollouts [ClusterClass]", func() { ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, Flavor: "topology", - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), } }) }) diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 85f93f751eec..e801906dddf9 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -37,7 +37,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/discovery" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -240,8 +240,8 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg Namespace: managementClusterNamespace.Name, ClusterName: managementClusterName, KubernetesVersion: initKubernetesVersion, - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), }, PreWaitForCluster: func() { if input.PreWaitForCluster != nil { @@ -359,8 +359,8 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg if kubernetesVersion == "" { kubernetesVersion = input.E2EConfig.GetVariable(KubernetesVersion) } - controlPlaneMachineCount := pointer.Int64(1) - workerMachineCount := pointer.Int64(1) + controlPlaneMachineCount := ptr.To[int64](1) + workerMachineCount := ptr.To[int64](1) log.Logf("Creating the workload cluster with name %q using the %q template (Kubernetes %s, %d control-plane machines, %d worker machines)", workLoadClusterName, "(default)", kubernetesVersion, *controlPlaneMachineCount, *workerMachineCount) diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index df06f2048afd..726f0a724d4c 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() { @@ -32,7 +32,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.5/clusterctl-{OS}-{ARCH}", // We have to pin the providers because with `InitWithProvidersContract` the test would // use the latest version for the contract (which is v1.3.X for v1beta1). @@ -61,7 +61,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.5/clusterctl-{OS}-{ARCH}", // We have to pin the providers because with `InitWithProvidersContract` the test would // use the latest version for the contract (which is v1.5.X for v1beta1). @@ -87,7 +87,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.4=>cur BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.5/clusterctl-{OS}-{ARCH}", // We have to pin the providers because with `InitWithProvidersContract` the test would // use the latest version for the contract (which is v1.5.X for v1beta1). @@ -113,7 +113,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.5=>current)", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1beta1", // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.5/bases. @@ -133,7 +133,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.5=>cur BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1beta1", // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.5/bases. diff --git a/test/e2e/k8s_conformance.go b/test/e2e/k8s_conformance.go index e376403b5cb4..689c972476e4 100644 --- a/test/e2e/k8s_conformance.go +++ b/test/e2e/k8s_conformance.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -110,8 +110,8 @@ func K8SConformanceSpec(ctx context.Context, inputGetter func() K8SConformanceSp Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(workerMachineCount), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](workerMachineCount), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), diff --git a/test/e2e/k8s_conformance_test.go b/test/e2e/k8s_conformance_test.go index 8725b50ecc8d..c30929b32b64 100644 --- a/test/e2e/k8s_conformance_test.go +++ b/test/e2e/k8s_conformance_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing K8S conformance [Conformance]", func() { @@ -32,6 +32,6 @@ var _ = Describe("When testing K8S conformance [Conformance]", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker")} + InfrastructureProvider: ptr.To("docker")} }) }) diff --git a/test/e2e/kcp_adoption.go b/test/e2e/kcp_adoption.go index be673e30fc44..e6d7b6eeb11c 100644 --- a/test/e2e/kcp_adoption.go +++ b/test/e2e/kcp_adoption.go @@ -29,7 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -82,7 +82,7 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu namespace *corev1.Namespace cancelWatches context.CancelFunc cluster *clusterv1.Cluster - replicas = pointer.Int64(1) + replicas = ptr.To[int64](1) ) SetDefaultEventuallyTimeout(15 * time.Minute) @@ -120,14 +120,14 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu // pass the clusterctl config file that points to the local provider repository created for this test, ClusterctlConfigPath: input.ClusterctlConfigPath, // select template - Flavor: pointer.StringDeref(input.Flavor, "kcp-adoption"), + Flavor: ptr.Deref(input.Flavor, "kcp-adoption"), // define template variables Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), InfrastructureProvider: infrastructureProvider, ControlPlaneMachineCount: replicas, - WorkerMachineCount: pointer.Int64(0), + WorkerMachineCount: ptr.To[int64](0), // setup clusterctl logs folder LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), }) diff --git a/test/e2e/kcp_adoption_test.go b/test/e2e/kcp_adoption_test.go index 2db1e8e5093d..999764945f50 100644 --- a/test/e2e/kcp_adoption_test.go +++ b/test/e2e/kcp_adoption_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing KCP adoption", func() { @@ -32,6 +32,6 @@ var _ = Describe("When testing KCP adoption", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker")} + InfrastructureProvider: ptr.To("docker")} }) }) diff --git a/test/e2e/kcp_remediations.go b/test/e2e/kcp_remediations.go index fae495f93f15..4dd28ff700db 100644 --- a/test/e2e/kcp_remediations.go +++ b/test/e2e/kcp_remediations.go @@ -31,7 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/clientcmd" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -121,7 +121,7 @@ func KCPRemediationSpec(ctx context.Context, inputGetter func() KCPRemediationSp Proxy: input.BootstrapClusterProxy, ArtifactFolder: input.ArtifactFolder, SpecName: specName, - Flavor: pointer.StringDeref(input.Flavor, "kcp-remediation"), + Flavor: ptr.Deref(input.Flavor, "kcp-remediation"), InfrastructureProvider: input.InfrastructureProvider, // values to be injected in the template @@ -470,8 +470,8 @@ func createWorkloadClusterAndWait(ctx context.Context, input createWorkloadClust Namespace: input.Namespace, ClusterName: clusterName, KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(3), - WorkerMachineCount: pointer.Int64(0), + ControlPlaneMachineCount: ptr.To[int64](3), + WorkerMachineCount: ptr.To[int64](0), InfrastructureProvider: infrastructureProvider, // setup clusterctl logs folder LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.Proxy.GetName()), @@ -690,7 +690,7 @@ func getAuthenticationToken(ctx context.Context, managementClusterProxy framewor tokenRequest := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - ExpirationSeconds: pointer.Int64(2 * 60 * 60), // 2 hours. + ExpirationSeconds: ptr.To[int64](2 * 60 * 60), // 2 hours. }, } tokenRequest, err := managementClusterProxy.GetClientSet().CoreV1().ServiceAccounts(namespace).CreateToken(ctx, "mhc-test", tokenRequest, metav1.CreateOptions{}) diff --git a/test/e2e/kcp_remediations_test.go b/test/e2e/kcp_remediations_test.go index 9b9a4869b3b7..06d61f1f8190 100644 --- a/test/e2e/kcp_remediations_test.go +++ b/test/e2e/kcp_remediations_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing KCP remediation", func() { @@ -32,6 +32,6 @@ var _ = Describe("When testing KCP remediation", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker")} + InfrastructureProvider: ptr.To("docker")} }) }) diff --git a/test/e2e/machine_pool.go b/test/e2e/machine_pool.go index e72ec388c6c8..f8f8e4bfee3c 100644 --- a/test/e2e/machine_pool.go +++ b/test/e2e/machine_pool.go @@ -28,7 +28,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -95,12 +95,12 @@ func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolInput) { ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: infrastructureProvider, - Flavor: pointer.StringDeref(input.Flavor, "machine-pool"), + Flavor: ptr.Deref(input.Flavor, "machine-pool"), Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(int64(workerMachineCount)), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](int64(workerMachineCount)), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), diff --git a/test/e2e/machine_pool_test.go b/test/e2e/machine_pool_test.go index 99a1f1d62e47..0951cd940705 100644 --- a/test/e2e/machine_pool_test.go +++ b/test/e2e/machine_pool_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing MachinePools", func() { @@ -32,7 +32,7 @@ var _ = Describe("When testing MachinePools", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), } }) }) diff --git a/test/e2e/md_remediations.go b/test/e2e/md_remediations.go index 3d9be2eeafe4..65d27e6f2f8a 100644 --- a/test/e2e/md_remediations.go +++ b/test/e2e/md_remediations.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -94,12 +94,12 @@ func MachineDeploymentRemediationSpec(ctx context.Context, inputGetter func() Ma ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: infrastructureProvider, - Flavor: pointer.StringDeref(input.Flavor, "md-remediation"), + Flavor: ptr.Deref(input.Flavor, "md-remediation"), Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), diff --git a/test/e2e/md_remediations_test.go b/test/e2e/md_remediations_test.go index 458b77f38cfa..d2c7cc11aa8d 100644 --- a/test/e2e/md_remediations_test.go +++ b/test/e2e/md_remediations_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing MachineDeployment remediation", func() { @@ -32,7 +32,7 @@ var _ = Describe("When testing MachineDeployment remediation", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), } }) }) diff --git a/test/e2e/md_rollout.go b/test/e2e/md_rollout.go index 46b8d6f38aa9..eefc15eed203 100644 --- a/test/e2e/md_rollout.go +++ b/test/e2e/md_rollout.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -92,8 +92,8 @@ func MachineDeploymentRolloutSpec(ctx context.Context, inputGetter func() Machin Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), diff --git a/test/e2e/md_rollout_test.go b/test/e2e/md_rollout_test.go index 6d30413fd665..eb138b1107e8 100644 --- a/test/e2e/md_rollout_test.go +++ b/test/e2e/md_rollout_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing MachineDeployment rolling upgrades", func() { @@ -32,7 +32,7 @@ var _ = Describe("When testing MachineDeployment rolling upgrades", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), } }) }) diff --git a/test/e2e/md_scale.go b/test/e2e/md_scale.go index 0c3d92505967..ee8feac02e86 100644 --- a/test/e2e/md_scale.go +++ b/test/e2e/md_scale.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -92,8 +92,8 @@ func MachineDeploymentScaleSpec(ctx context.Context, inputGetter func() MachineD Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), @@ -101,7 +101,7 @@ func MachineDeploymentScaleSpec(ctx context.Context, inputGetter func() MachineD WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }, clusterResources) - Expect(clusterResources.MachineDeployments[0].Spec.Replicas).To(Equal(pointer.Int32(1))) + Expect(clusterResources.MachineDeployments[0].Spec.Replicas).To(Equal(ptr.To[int32](1))) By("Scaling the MachineDeployment out to 3") framework.ScaleAndWaitMachineDeployment(ctx, framework.ScaleAndWaitMachineDeploymentInput{ diff --git a/test/e2e/md_scale_test.go b/test/e2e/md_scale_test.go index e60fa686e59c..363e77c4c48e 100644 --- a/test/e2e/md_scale_test.go +++ b/test/e2e/md_scale_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing MachineDeployment scale out/in", func() { @@ -32,7 +32,7 @@ var _ = Describe("When testing MachineDeployment scale out/in", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), } }) }) diff --git a/test/e2e/node_drain_timeout.go b/test/e2e/node_drain_timeout.go index 27d5d04f0282..56a0ac06f6fa 100644 --- a/test/e2e/node_drain_timeout.go +++ b/test/e2e/node_drain_timeout.go @@ -27,7 +27,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -101,12 +101,12 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), InfrastructureProvider: infrastructureProvider, - Flavor: pointer.StringDeref(input.Flavor, "node-drain"), + Flavor: ptr.Deref(input.Flavor, "node-drain"), Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - ControlPlaneMachineCount: pointer.Int64(int64(controlPlaneReplicas)), - WorkerMachineCount: pointer.Int64(1), + ControlPlaneMachineCount: ptr.To[int64](int64(controlPlaneReplicas)), + WorkerMachineCount: ptr.To[int64](1), }, ControlPlaneWaiters: input.ControlPlaneWaiters, WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), @@ -116,7 +116,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo cluster := clusterResources.Cluster controlplane = clusterResources.ControlPlane machineDeployments = clusterResources.MachineDeployments - Expect(machineDeployments[0].Spec.Replicas).To(Equal(pointer.Int32(1))) + Expect(machineDeployments[0].Spec.Replicas).To(Equal(ptr.To[int32](1))) By("Add a deployment with unevictable pods and podDisruptionBudget to the workload cluster. The deployed pods cannot be evicted in the node draining process.") workloadClusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name) diff --git a/test/e2e/node_drain_timeout_test.go b/test/e2e/node_drain_timeout_test.go index 4bc661f75ea7..408b9d44e0c5 100644 --- a/test/e2e/node_drain_timeout_test.go +++ b/test/e2e/node_drain_timeout_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing node drain timeout", func() { @@ -32,7 +32,7 @@ var _ = Describe("When testing node drain timeout", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), } }) }) diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 983efdf77393..940b64413c1e 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -108,12 +108,12 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) flavor = *input.Flavor } - controlPlaneMachineCount := pointer.Int64(1) + controlPlaneMachineCount := ptr.To[int64](1) if input.ControlPlaneMachineCount != nil { controlPlaneMachineCount = input.ControlPlaneMachineCount } - workerMachineCount := pointer.Int64(1) + workerMachineCount := ptr.To[int64](1) if input.WorkerMachineCount != nil { workerMachineCount = input.WorkerMachineCount } diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index 4335b7579c59..ea030ce558fe 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -22,7 +22,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/kubetest" @@ -36,7 +36,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, @@ -69,8 +69,8 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: pointer.String("topology"), - InfrastructureProvider: pointer.String("docker"), + Flavor: ptr.To("topology"), + InfrastructureProvider: ptr.To("docker"), // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, @@ -104,8 +104,8 @@ var _ = Describe("When following the Cluster API quick-start with IPv6 [IPv6]", BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: pointer.String("ipv6"), - InfrastructureProvider: pointer.String("docker"), + Flavor: ptr.To("ipv6"), + InfrastructureProvider: ptr.To("docker"), } }) }) @@ -118,8 +118,8 @@ var _ = Describe("When following the Cluster API quick-start with Ignition", fun BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: pointer.String("ignition"), - InfrastructureProvider: pointer.String("docker"), + Flavor: ptr.To("ignition"), + InfrastructureProvider: ptr.To("docker"), } }) }) @@ -132,8 +132,8 @@ var _ = Describe("When following the Cluster API quick-start with dualstack and BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: pointer.String("topology-dualstack-ipv4-primary"), - InfrastructureProvider: pointer.String("docker"), + Flavor: ptr.To("topology-dualstack-ipv4-primary"), + InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { By("Running kubetest dualstack tests") // Start running the dualstack test suite from kubetest. @@ -161,8 +161,8 @@ var _ = Describe("When following the Cluster API quick-start with dualstack and BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: pointer.String("topology-dualstack-ipv6-primary"), - InfrastructureProvider: pointer.String("docker"), + Flavor: ptr.To("topology-dualstack-ipv6-primary"), + InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { By("Running kubetest dualstack tests") // Start running the dualstack test suite from kubetest. @@ -190,7 +190,7 @@ var _ = Describe("When following the Cluster API quick-start check finalizers re BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName) @@ -207,8 +207,8 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass c BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - Flavor: pointer.String("topology"), - InfrastructureProvider: pointer.String("docker"), + Flavor: ptr.To("topology"), + InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName) diff --git a/test/e2e/scale.go b/test/e2e/scale.go index 37a6ba1810fb..edfee9ee372e 100644 --- a/test/e2e/scale.go +++ b/test/e2e/scale.go @@ -37,7 +37,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -177,7 +177,7 @@ func scaleSpec(ctx context.Context, inputGetter func() scaleSpecInput) { flavor = *input.Flavor } - controlPlaneMachineCount := pointer.Int64(1) + controlPlaneMachineCount := ptr.To[int64](1) if input.ControlPlaneMachineCount != nil { controlPlaneMachineCount = input.ControlPlaneMachineCount } @@ -186,10 +186,10 @@ func scaleSpec(ctx context.Context, inputGetter func() scaleSpecInput) { controlPlaneMachineCountStr := input.E2EConfig.GetVariable(scaleControlPlaneMachineCount) controlPlaneMachineCountInt, err := strconv.Atoi(controlPlaneMachineCountStr) Expect(err).ToNot(HaveOccurred()) - controlPlaneMachineCount = pointer.Int64(int64(controlPlaneMachineCountInt)) + controlPlaneMachineCount = ptr.To[int64](int64(controlPlaneMachineCountInt)) } - workerMachineCount := pointer.Int64(1) + workerMachineCount := ptr.To[int64](1) if input.WorkerMachineCount != nil { workerMachineCount = input.WorkerMachineCount } @@ -198,10 +198,10 @@ func scaleSpec(ctx context.Context, inputGetter func() scaleSpecInput) { workerMachineCountStr := input.E2EConfig.GetVariable(scaleWorkerMachineCount) workerMachineCountInt, err := strconv.Atoi(workerMachineCountStr) Expect(err).ToNot(HaveOccurred()) - workerMachineCount = pointer.Int64(int64(workerMachineCountInt)) + workerMachineCount = ptr.To[int64](int64(workerMachineCountInt)) } - machineDeploymentCount := pointer.Int64(1) + machineDeploymentCount := ptr.To[int64](1) if input.MachineDeploymentCount != nil { machineDeploymentCount = input.MachineDeploymentCount } @@ -210,7 +210,7 @@ func scaleSpec(ctx context.Context, inputGetter func() scaleSpecInput) { machineDeploymentCountStr := input.E2EConfig.GetVariable(scaleMachineDeploymentCount) machineDeploymentCountInt, err := strconv.Atoi(machineDeploymentCountStr) Expect(err).ToNot(HaveOccurred()) - machineDeploymentCount = pointer.Int64(int64(machineDeploymentCountInt)) + machineDeploymentCount = ptr.To[int64](int64(machineDeploymentCountInt)) } clusterCount := int64(10) diff --git a/test/e2e/scale_test.go b/test/e2e/scale_test.go index e72b58fa9e9e..f392cba19a92 100644 --- a/test/e2e/scale_test.go +++ b/test/e2e/scale_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing the machinery for scale testing using in-memory provider", func() { @@ -29,15 +29,15 @@ var _ = Describe("When testing the machinery for scale testing using in-memory p return scaleSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, - InfrastructureProvider: pointer.String("in-memory"), + InfrastructureProvider: ptr.To("in-memory"), BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, - ClusterCount: pointer.Int64(10), - Concurrency: pointer.Int64(5), - Flavor: pointer.String(""), - ControlPlaneMachineCount: pointer.Int64(1), - MachineDeploymentCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(3), + ClusterCount: ptr.To[int64](10), + Concurrency: ptr.To[int64](5), + Flavor: ptr.To(""), + ControlPlaneMachineCount: ptr.To[int64](1), + MachineDeploymentCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](3), SkipCleanup: skipCleanup, } }) @@ -48,15 +48,15 @@ var _ = Describe("When scale testing using in-memory provider [Scale]", func() return scaleSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, - InfrastructureProvider: pointer.String("in-memory"), + InfrastructureProvider: ptr.To("in-memory"), BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, - ClusterCount: pointer.Int64(10), - Concurrency: pointer.Int64(5), - Flavor: pointer.String(""), - ControlPlaneMachineCount: pointer.Int64(1), - MachineDeploymentCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(3), + ClusterCount: ptr.To[int64](10), + Concurrency: ptr.To[int64](5), + Flavor: ptr.To(""), + ControlPlaneMachineCount: ptr.To[int64](1), + MachineDeploymentCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](3), SkipCleanup: skipCleanup, } }) diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go index 484dea5a0943..c96f48aa162f 100644 --- a/test/e2e/self_hosted.go +++ b/test/e2e/self_hosted.go @@ -27,7 +27,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -334,11 +334,11 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) ) if input.E2EConfig.HasVariable(CPMachineTemplateUpgradeTo) { - upgradeCPMachineTemplateTo = pointer.String(input.E2EConfig.GetVariable(CPMachineTemplateUpgradeTo)) + upgradeCPMachineTemplateTo = ptr.To(input.E2EConfig.GetVariable(CPMachineTemplateUpgradeTo)) } if input.E2EConfig.HasVariable(WorkersMachineTemplateUpgradeTo) { - upgradeWorkersMachineTemplateTo = pointer.String(input.E2EConfig.GetVariable(WorkersMachineTemplateUpgradeTo)) + upgradeWorkersMachineTemplateTo = ptr.To(input.E2EConfig.GetVariable(WorkersMachineTemplateUpgradeTo)) } framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ diff --git a/test/e2e/self_hosted_test.go b/test/e2e/self_hosted_test.go index 11ee79afa660..7c03e68b4d5a 100644 --- a/test/e2e/self_hosted_test.go +++ b/test/e2e/self_hosted_test.go @@ -21,7 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) var _ = Describe("When testing Cluster API working on self-hosted clusters", func() { @@ -32,7 +32,7 @@ var _ = Describe("When testing Cluster API working on self-hosted clusters", fun BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InfrastructureProvider: pointer.String("docker"), + InfrastructureProvider: ptr.To("docker"), } }) }) @@ -46,9 +46,9 @@ var _ = Describe("When testing Cluster API working on self-hosted clusters using ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, Flavor: "topology", - InfrastructureProvider: pointer.String("docker"), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(1), + InfrastructureProvider: ptr.To("docker"), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), } }) }) @@ -62,9 +62,9 @@ var _ = Describe("When testing Cluster API working on self-hosted clusters using ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, Flavor: "topology", - InfrastructureProvider: pointer.String("docker"), - ControlPlaneMachineCount: pointer.Int64(3), - WorkerMachineCount: pointer.Int64(1), + InfrastructureProvider: ptr.To("docker"), + ControlPlaneMachineCount: ptr.To[int64](3), + WorkerMachineCount: ptr.To[int64](1), } }) }) @@ -78,9 +78,9 @@ var _ = Describe("When testing Cluster API working on single-node self-hosted cl ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, Flavor: "topology-single-node-cluster", - InfrastructureProvider: pointer.String("docker"), - ControlPlaneMachineCount: pointer.Int64(1), - WorkerMachineCount: pointer.Int64(0), + InfrastructureProvider: ptr.To("docker"), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](0), } }) }) diff --git a/test/framework/autoscaler_helpers.go b/test/framework/autoscaler_helpers.go index fcf430f52cbc..ba896e9c5c00 100644 --- a/test/framework/autoscaler_helpers.go +++ b/test/framework/autoscaler_helpers.go @@ -34,7 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -173,7 +173,7 @@ func AddScaleUpDeploymentAndWait(ctx context.Context, input AddScaleUpDeployment }, }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32(int32(replicas)), + Replicas: ptr.To[int32](int32(replicas)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "scale-up", @@ -408,7 +408,7 @@ func getAuthenticationTokenForAutoscaler(ctx context.Context, managementClusterP tokenRequest := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - ExpirationSeconds: pointer.Int64(2 * 60 * 60), // 2 hours. + ExpirationSeconds: ptr.To[int64](2 * 60 * 60), // 2 hours. }, } tokenRequest, err := managementClusterProxy.GetClientSet().CoreV1().ServiceAccounts(namespace).CreateToken(ctx, name, tokenRequest, metav1.CreateOptions{}) diff --git a/test/framework/clusterctl/e2e_config.go b/test/framework/clusterctl/e2e_config.go index 206fe2c19574..da7464b4da0b 100644 --- a/test/framework/clusterctl/e2e_config.go +++ b/test/framework/clusterctl/e2e_config.go @@ -32,7 +32,7 @@ import ( . "github.com/onsi/gomega" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/version" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/yaml" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" @@ -591,7 +591,7 @@ func (c *E2EConfig) GetInt64PtrVariable(varName string) *int64 { wCount, err := strconv.ParseInt(wCountStr, 10, 64) Expect(err).ToNot(HaveOccurred()) - return pointer.Int64(wCount) + return ptr.To[int64](wCount) } // GetInt32PtrVariable returns an Int32Ptr variable from the e2e config file. @@ -603,7 +603,7 @@ func (c *E2EConfig) GetInt32PtrVariable(varName string) *int32 { wCount, err := strconv.ParseUint(wCountStr, 10, 32) Expect(err).ToNot(HaveOccurred()) - return pointer.Int32(int32(wCount)) + return ptr.To[int32](int32(wCount)) } // GetProviderVersions returns the sorted list of versions defined for a provider. diff --git a/test/framework/controlplane_helpers.go b/test/framework/controlplane_helpers.go index 11809e29f30c..dcdbf85f4799 100644 --- a/test/framework/controlplane_helpers.go +++ b/test/framework/controlplane_helpers.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -410,8 +410,8 @@ func ScaleAndWaitControlPlane(ctx context.Context, input ScaleAndWaitControlPlan patchHelper, err := patch.NewHelper(input.ControlPlane, input.ClusterProxy.GetClient()) Expect(err).ToNot(HaveOccurred()) - scaleBefore := pointer.Int32Deref(input.ControlPlane.Spec.Replicas, 0) - input.ControlPlane.Spec.Replicas = pointer.Int32(input.Replicas) + scaleBefore := ptr.Deref(input.ControlPlane.Spec.Replicas, 0) + input.ControlPlane.Spec.Replicas = ptr.To[int32](input.Replicas) log.Logf("Scaling controlplane %s from %v to %v replicas", klog.KObj(input.ControlPlane), scaleBefore, input.Replicas) Eventually(func() error { return patchHelper.Patch(ctx, input.ControlPlane) diff --git a/test/framework/deployment_helpers.go b/test/framework/deployment_helpers.go index 2bdfeba1932b..cae3b5e79768 100644 --- a/test/framework/deployment_helpers.go +++ b/test/framework/deployment_helpers.go @@ -45,7 +45,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" toolscache "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -464,7 +464,7 @@ func DeployUnevictablePod(ctx context.Context, input DeployUnevictablePodInput) Namespace: input.Namespace, }, Spec: appsv1.DeploymentSpec{ - Replicas: pointer.Int32(4), + Replicas: ptr.To[int32](4), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "nonstop", diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index aa5b3b2e9921..4b1b6b0a6fba 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -141,7 +141,7 @@ func AssertMachineDeploymentFailureDomains(ctx context.Context, input AssertMach Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling AssertMachineDeploymentFailureDomains") Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling AssertMachineDeploymentFailureDomains") - machineDeploymentFD := pointer.StringDeref(input.MachineDeployment.Spec.Template.Spec.FailureDomain, "") + machineDeploymentFD := ptr.Deref(input.MachineDeployment.Spec.Template.Spec.FailureDomain, "") Byf("Checking all the machines controlled by %s are in the %q failure domain", input.MachineDeployment.Name, machineDeploymentFD) selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector) @@ -154,7 +154,7 @@ func AssertMachineDeploymentFailureDomains(ctx context.Context, input AssertMach for i := range ms.Items { machineSet := ms.Items[i] - machineSetFD := pointer.StringDeref(machineSet.Spec.Template.Spec.FailureDomain, "") + machineSetFD := ptr.Deref(machineSet.Spec.Template.Spec.FailureDomain, "") Expect(machineSetFD).To(Equal(machineDeploymentFD), "MachineSet %s is in the %q failure domain, expecting %q", machineSet.Name, machineSetFD, machineDeploymentFD) selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) @@ -166,7 +166,7 @@ func AssertMachineDeploymentFailureDomains(ctx context.Context, input AssertMach }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list Machines for Cluster %s", klog.KObj(input.Cluster)) for _, machine := range machines.Items { - machineFD := pointer.StringDeref(machine.Spec.FailureDomain, "") + machineFD := ptr.Deref(machine.Spec.FailureDomain, "") Expect(machineFD).To(Equal(machineDeploymentFD), "Machine %s is in the %q failure domain, expecting %q", machine.Name, machineFD, machineDeploymentFD) } } @@ -466,7 +466,7 @@ func ScaleAndWaitMachineDeployment(ctx context.Context, input ScaleAndWaitMachin log.Logf("Scaling machine deployment %s from %d to %d replicas", klog.KObj(input.MachineDeployment), *input.MachineDeployment.Spec.Replicas, input.Replicas) patchHelper, err := patch.NewHelper(input.MachineDeployment, input.ClusterProxy.GetClient()) Expect(err).ToNot(HaveOccurred()) - input.MachineDeployment.Spec.Replicas = pointer.Int32(input.Replicas) + input.MachineDeployment.Spec.Replicas = ptr.To[int32](input.Replicas) Eventually(func() error { return patchHelper.Patch(ctx, input.MachineDeployment) }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale machine deployment %s", klog.KObj(input.MachineDeployment)) @@ -530,7 +530,7 @@ func ScaleAndWaitMachineDeploymentTopology(ctx context.Context, input ScaleAndWa } patchHelper, err := patch.NewHelper(input.Cluster, input.ClusterProxy.GetClient()) Expect(err).ToNot(HaveOccurred()) - mdTopology.Replicas = pointer.Int32(input.Replicas) + mdTopology.Replicas = ptr.To[int32](input.Replicas) input.Cluster.Spec.Topology.Workers.MachineDeployments[0] = mdTopology Eventually(func() error { return patchHelper.Patch(ctx, input.Cluster) diff --git a/test/framework/ownerreference_helpers.go b/test/framework/ownerreference_helpers.go index 4b6b4ae0310f..910952fbc77a 100644 --- a/test/framework/ownerreference_helpers.go +++ b/test/framework/ownerreference_helpers.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -139,11 +139,11 @@ var ( machineHealthCheckKind = "MachineHealthCheck" clusterOwner = metav1.OwnerReference{Kind: clusterKind, APIVersion: coreGroupVersion} - clusterController = metav1.OwnerReference{Kind: clusterKind, APIVersion: coreGroupVersion, Controller: pointer.Bool(true)} + clusterController = metav1.OwnerReference{Kind: clusterKind, APIVersion: coreGroupVersion, Controller: ptr.To(true)} clusterClassOwner = metav1.OwnerReference{Kind: clusterClassKind, APIVersion: coreGroupVersion} - machineDeploymentController = metav1.OwnerReference{Kind: machineDeploymentKind, APIVersion: coreGroupVersion, Controller: pointer.Bool(true)} - machineSetController = metav1.OwnerReference{Kind: machineSetKind, APIVersion: coreGroupVersion, Controller: pointer.Bool(true)} - machineController = metav1.OwnerReference{Kind: machineKind, APIVersion: coreGroupVersion, Controller: pointer.Bool(true)} + machineDeploymentController = metav1.OwnerReference{Kind: machineDeploymentKind, APIVersion: coreGroupVersion, Controller: ptr.To(true)} + machineSetController = metav1.OwnerReference{Kind: machineSetKind, APIVersion: coreGroupVersion, Controller: ptr.To(true)} + machineController = metav1.OwnerReference{Kind: machineKind, APIVersion: coreGroupVersion, Controller: ptr.To(true)} ) // CoreOwnerReferenceAssertion maps Cluster API core types to functions which return an error if the passed @@ -187,7 +187,7 @@ var ( clusterResourceSetBindingKind = "ClusterResourceSetBinding" machinePoolKind = "MachinePool" - machinePoolController = metav1.OwnerReference{Kind: machinePoolKind, APIVersion: expv1.GroupVersion.String(), Controller: pointer.Bool(true)} + machinePoolController = metav1.OwnerReference{Kind: machinePoolKind, APIVersion: expv1.GroupVersion.String(), Controller: ptr.To(true)} clusterResourceSetOwner = metav1.OwnerReference{Kind: clusterResourceSetKind, APIVersion: addonsv1.GroupVersion.String()} ) @@ -239,7 +239,7 @@ var ( kubeadmControlPlaneGroupVersion = controlplanev1.GroupVersion.String() - kubeadmControlPlaneController = metav1.OwnerReference{Kind: kubeadmControlPlaneKind, APIVersion: kubeadmControlPlaneGroupVersion, Controller: pointer.Bool(true)} + kubeadmControlPlaneController = metav1.OwnerReference{Kind: kubeadmControlPlaneKind, APIVersion: kubeadmControlPlaneGroupVersion, Controller: ptr.To(true)} ) // KubeadmControlPlaneOwnerReferenceAssertions maps Kubeadm control plane types to functions which return an error if the passed @@ -263,7 +263,7 @@ var ( kubeadmConfigTemplateKind = "KubeadmConfigTemplate" kubeadmConfigGroupVersion = bootstrapv1.GroupVersion.String() - kubeadmConfigController = metav1.OwnerReference{Kind: kubeadmConfigKind, APIVersion: kubeadmConfigGroupVersion, Controller: pointer.Bool(true)} + kubeadmConfigController = metav1.OwnerReference{Kind: kubeadmConfigKind, APIVersion: kubeadmConfigGroupVersion, Controller: ptr.To(true)} ) // KubeadmBootstrapOwnerReferenceAssertions maps KubeadmBootstrap types to functions which return an error if the passed OwnerReferences diff --git a/test/go.mod b/test/go.mod index 6e578861fb71..dbac99e8262d 100644 --- a/test/go.mod +++ b/test/go.mod @@ -30,7 +30,7 @@ require ( k8s.io/client-go v0.28.4 k8s.io/component-base v0.28.4 k8s.io/klog/v2 v2.100.1 - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 + k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 sigs.k8s.io/controller-runtime v0.16.3 sigs.k8s.io/kind v0.20.0 diff --git a/test/go.sum b/test/go.sum index 6868628edd84..a893fe217a80 100644 --- a/test/go.sum +++ b/test/go.sum @@ -488,8 +488,8 @@ k8s.io/kms v0.28.4 h1:PMgY/3CQTWP9eIKmNQiTgjLIZ0ns6O+voagzD2/4mSg= k8s.io/kms v0.28.4/go.mod h1:HL4/lR/bhjAJPbqycKtfhWiKh1Sp21cpHOL8P4oo87w= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= +k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= diff --git a/test/infrastructure/container/docker.go b/test/infrastructure/container/docker.go index f2de71416eae..fc5428a97207 100644 --- a/test/infrastructure/container/docker.go +++ b/test/infrastructure/container/docker.go @@ -38,7 +38,7 @@ import ( "github.com/docker/go-connections/nat" "github.com/pkg/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/kind" @@ -406,7 +406,7 @@ func (d *dockerRuntime) RunContainer(ctx context.Context, runConfig *RunContaine Tmpfs: runConfig.Tmpfs, PortBindings: nat.PortMap{}, RestartPolicy: dockercontainer.RestartPolicy{Name: restartPolicy, MaximumRetryCount: restartMaximumRetryCount}, - Init: pointer.Bool(false), + Init: ptr.To(false), } networkConfig := network.NetworkingConfig{} diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index 3d72b64fed2a..a39046f503a3 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -30,7 +30,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -271,7 +271,7 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust } if machinePool.Spec.Replicas == nil { - machinePool.Spec.Replicas = pointer.Int32(1) + machinePool.Spec.Replicas = ptr.To[int32](1) } // First, reconcile the Docker containers, but do not delete any as we need to delete the Machine to ensure node cordon/drain. diff --git a/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook_test.go b/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook_test.go index cc51e96cbeb9..a39d3a880af3 100644 --- a/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook_test.go +++ b/test/infrastructure/docker/internal/webhooks/dockermachinetemplate_webhook_test.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/gomega" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -67,42 +67,42 @@ func TestDockerMachineTemplateInvalid(t *testing.T) { name: "return no error if no modification", newTemplate: newTemplate, oldTemplate: newTemplate, - req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(false)}}, + req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(false)}}, wantError: false, }, { name: "don't allow modification", newTemplate: newTemplate, oldTemplate: &oldTemplate, - req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(false)}}, + req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(false)}}, wantError: true, }, { name: "don't allow modification, skip immutability annotation set", newTemplate: newTemplateSkipImmutabilityAnnotationSet, oldTemplate: &oldTemplate, - req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(false)}}, + req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(false)}}, wantError: true, }, { name: "don't allow modification, dry run, no skip immutability annotation set", newTemplate: newTemplate, oldTemplate: &oldTemplate, - req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(true)}}, + req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, wantError: true, }, { name: "skip immutability check", newTemplate: newTemplateSkipImmutabilityAnnotationSet, oldTemplate: &oldTemplate, - req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(true)}}, + req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, wantError: false, }, { name: "don't allow invalid metadata", newTemplate: newTemplateWithInvalidMetadata, oldTemplate: newTemplate, - req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(true)}}, + req: &admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, wantError: true, }, } diff --git a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go index c7920c47d770..593e7737b5f3 100644 --- a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go +++ b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go @@ -34,7 +34,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -290,7 +290,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalCloudMachine(ctx context.Cont // TODO: consider if to surface VM provisioned also on the cloud machine (currently it surfaces only on the inMemoryMachine) - inMemoryMachine.Spec.ProviderID = pointer.String(calculateProviderID(inMemoryMachine)) + inMemoryMachine.Spec.ProviderID = ptr.To(calculateProviderID(inMemoryMachine)) inMemoryMachine.Status.Ready = true conditions.MarkTrue(inMemoryMachine, infrav1.VMProvisionedCondition) return ctrl.Result{}, nil diff --git a/util/collections/machine_collection_test.go b/util/collections/machine_collection_test.go index e3623263d9e5..df6b14c10b82 100644 --- a/util/collections/machine_collection_test.go +++ b/util/collections/machine_collection_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" @@ -86,51 +86,51 @@ func TestMachinesLowestVersion(t *testing.T) { machines: func() collections.Machines { machines := collections.New() machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.20"), + Version: ptr.To("1.20"), }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.19.8"), + Version: ptr.To("1.19.8"), }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String(""), + Version: ptr.To(""), }}) return machines }(), - expected: pointer.String("1.19.8"), + expected: ptr.To("1.19.8"), }, { name: "return lowest version from machines with pre release versions", machines: func() collections.Machines { machines := collections.New() machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.20.1"), + Version: ptr.To("1.20.1"), }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.20.1-alpha.1"), + Version: ptr.To("1.20.1-alpha.1"), }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String(""), + Version: ptr.To(""), }}) return machines }(), - expected: pointer.String("1.20.1-alpha.1"), + expected: ptr.To("1.20.1-alpha.1"), }, { name: "return lowest version from machines with build identifier versions", machines: func() collections.Machines { machines := collections.New() machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.20.1+xyz.2"), + Version: ptr.To("1.20.1+xyz.2"), }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.20.1+xyz.1"), + Version: ptr.To("1.20.1+xyz.1"), }}) machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ - Version: pointer.String(""), + Version: ptr.To(""), }}) return machines }(), - expected: pointer.String("1.20.1+xyz.1"), + expected: ptr.To("1.20.1+xyz.1"), }, } diff --git a/util/collections/machine_filters_test.go b/util/collections/machine_filters_test.go index afca7f7d4635..5665e26802bd 100644 --- a/util/collections/machine_filters_test.go +++ b/util/collections/machine_filters_test.go @@ -22,7 +22,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -179,13 +179,13 @@ func TestShouldRolloutBeforeCertificatesExpire(t *testing.T) { }) t.Run("if machine is nil it should return false", func(t *testing.T) { g := NewWithT(t) - rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: pointer.Int32(10)} + rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: ptr.To[int32](10)} g.Expect(collections.ShouldRolloutBefore(reconciliationTime, rb)(nil)).To(BeFalse()) }) t.Run("if the machine certificate expiry information is not available it should return false", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: pointer.Int32(10)} + rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: ptr.To[int32](10)} g.Expect(collections.ShouldRolloutBefore(reconciliationTime, rb)(m)).To(BeFalse()) }) t.Run("if the machine certificates are not going to expire within the expiry time it should return false", func(t *testing.T) { @@ -196,7 +196,7 @@ func TestShouldRolloutBeforeCertificatesExpire(t *testing.T) { CertificatesExpiryDate: &metav1.Time{Time: certificateExpiryTime}, }, } - rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: pointer.Int32(10)} + rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: ptr.To[int32](10)} g.Expect(collections.ShouldRolloutBefore(reconciliationTime, rb)(m)).To(BeFalse()) }) t.Run("if machine certificates will expire within the expiry time then it should return true", func(t *testing.T) { @@ -207,7 +207,7 @@ func TestShouldRolloutBeforeCertificatesExpire(t *testing.T) { CertificatesExpiryDate: &metav1.Time{Time: certificateExpiryTime}, }, } - rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: pointer.Int32(10)} + rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: ptr.To[int32](10)} g.Expect(collections.ShouldRolloutBefore(reconciliationTime, rb)(m)).To(BeTrue()) }) } @@ -235,27 +235,27 @@ func TestHashAnnotationKey(t *testing.T) { func TestInFailureDomain(t *testing.T) { t.Run("nil machine returns false", func(t *testing.T) { g := NewWithT(t) - g.Expect(collections.InFailureDomains(pointer.String("test"))(nil)).To(BeFalse()) + g.Expect(collections.InFailureDomains(ptr.To("test"))(nil)).To(BeFalse()) }) t.Run("machine with given failure domain returns true", func(t *testing.T) { g := NewWithT(t) - m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: pointer.String("test")}} - g.Expect(collections.InFailureDomains(pointer.String("test"))(m)).To(BeTrue()) + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("test")}} + g.Expect(collections.InFailureDomains(ptr.To("test"))(m)).To(BeTrue()) }) t.Run("machine with a different failure domain returns false", func(t *testing.T) { g := NewWithT(t) - m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: pointer.String("notTest")}} + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("notTest")}} g.Expect(collections.InFailureDomains( - pointer.String("test"), - pointer.String("test2"), - pointer.String("test3"), + ptr.To("test"), + ptr.To("test2"), + ptr.To("test3"), nil, - pointer.String("foo"))(m)).To(BeFalse()) + ptr.To("foo"))(m)).To(BeFalse()) }) t.Run("machine without failure domain returns false", func(t *testing.T) { g := NewWithT(t) m := &clusterv1.Machine{} - g.Expect(collections.InFailureDomains(pointer.String("test"))(m)).To(BeFalse()) + g.Expect(collections.InFailureDomains(ptr.To("test"))(m)).To(BeFalse()) }) t.Run("machine without failure domain returns true, when nil used for failure domain", func(t *testing.T) { g := NewWithT(t) @@ -264,8 +264,8 @@ func TestInFailureDomain(t *testing.T) { }) t.Run("machine with failure domain returns true, when one of multiple failure domains match", func(t *testing.T) { g := NewWithT(t) - m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: pointer.String("test")}} - g.Expect(collections.InFailureDomains(pointer.String("foo"), pointer.String("test"))(m)).To(BeTrue()) + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("test")}} + g.Expect(collections.InFailureDomains(ptr.To("foo"), ptr.To("test"))(m)).To(BeTrue()) }) } @@ -350,7 +350,7 @@ func TestWithVersion(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: pointer.String(""), + Version: ptr.To(""), }, } g.Expect(collections.WithVersion()(machine)).To(BeFalse()) @@ -360,7 +360,7 @@ func TestWithVersion(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("1..20"), + Version: ptr.To("1..20"), }, } g.Expect(collections.WithVersion()(machine)).To(BeFalse()) @@ -370,7 +370,7 @@ func TestWithVersion(t *testing.T) { g := NewWithT(t) machine := &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("1.20"), + Version: ptr.To("1.20"), }, } g.Expect(collections.WithVersion()(machine)).To(BeTrue()) diff --git a/util/failuredomains/failure_domains.go b/util/failuredomains/failure_domains.go index ee629f008801..6ef83615a0c3 100644 --- a/util/failuredomains/failure_domains.go +++ b/util/failuredomains/failure_domains.go @@ -21,7 +21,7 @@ import ( "sort" "k8s.io/klog/v2/klogr" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" @@ -83,7 +83,7 @@ func PickFewest(failureDomains clusterv1.FailureDomains, machines collections.Ma return nil } sort.Sort(aggregations) - return pointer.String(aggregations[0].id) + return ptr.To(aggregations[0].id) } func pick(failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations { diff --git a/util/failuredomains/failure_domains_test.go b/util/failuredomains/failure_domains_test.go index 155632238074..12a4eb7cdc3b 100644 --- a/util/failuredomains/failure_domains_test.go +++ b/util/failuredomains/failure_domains_test.go @@ -20,15 +20,15 @@ import ( "testing" . "github.com/onsi/gomega" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/collections" ) func TestNewFailureDomainPicker(t *testing.T) { - a := pointer.String("us-west-1a") - b := pointer.String("us-west-1b") + a := ptr.To("us-west-1a") + b := ptr.To("us-west-1b") fds := clusterv1.FailureDomains{ *a: clusterv1.FailureDomainSpec{}, @@ -98,8 +98,8 @@ func TestNewFailureDomainPicker(t *testing.T) { } func TestNewFailureDomainPickMost(t *testing.T) { - a := pointer.String("us-west-1a") - b := pointer.String("us-west-1b") + a := ptr.To("us-west-1a") + b := ptr.To("us-west-1b") fds := clusterv1.FailureDomains{ *a: clusterv1.FailureDomainSpec{ControlPlane: true}, diff --git a/util/patch/patch_test.go b/util/patch/patch_test.go index b84d6e0acb13..269167a18b91 100644 --- a/util/patch/patch_test.go +++ b/util/patch/patch_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -762,7 +762,7 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Updating the object spec") - obj.Spec.Replicas = pointer.Int32(10) + obj.Spec.Replicas = ptr.To[int32](10) t.Log("Patching the object") g.Expect(patcher.Patch(ctx, obj, WithStatusObservedGeneration{})).To(Succeed()) @@ -802,7 +802,7 @@ func TestPatchHelper(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) t.Log("Updating the object spec") - obj.Spec.Replicas = pointer.Int32(10) + obj.Spec.Replicas = ptr.To[int32](10) t.Log("Updating the object status") obj.Status.AvailableReplicas = 6 diff --git a/util/topology/topology_test.go b/util/topology/topology_test.go index b4cc702f53a8..0234cedb81c7 100644 --- a/util/topology/topology_test.go +++ b/util/topology/topology_test.go @@ -23,7 +23,7 @@ import ( admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -44,25 +44,25 @@ func TestShouldSkipImmutabilityChecks(t *testing.T) { }, { name: "false - dryRun pointer is false", - req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(false)}}, + req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(false)}}, obj: nil, want: false, }, { name: "false - nil obj", - req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(true)}}, + req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, obj: nil, want: false, }, { name: "false - no annotations", - req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(true)}}, + req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, obj: &unstructured.Unstructured{}, want: false, }, { name: "false - annotation not set", - req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(true)}}, + req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, obj: &unstructured.Unstructured{Object: map[string]interface{}{ "metadata": map[string]interface{}{ "annotations": map[string]interface{}{}, @@ -72,7 +72,7 @@ func TestShouldSkipImmutabilityChecks(t *testing.T) { }, { name: "true", - req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: pointer.Bool(true)}}, + req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{DryRun: ptr.To(true)}}, obj: &unstructured.Unstructured{Object: map[string]interface{}{ "metadata": map[string]interface{}{ "annotations": map[string]interface{}{