diff --git a/bootstrap/kubeadm/config/manager/manager.yaml b/bootstrap/kubeadm/config/manager/manager.yaml index dae2a9b6c615..b06c177c478f 100644 --- a/bootstrap/kubeadm/config/manager/manager.yaml +++ b/bootstrap/kubeadm/config/manager/manager.yaml @@ -22,7 +22,7 @@ spec: - "--leader-elect" - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false}" - "--bootstrap-token-ttl=${KUBEADM_BOOTSTRAP_TOKEN_TTL:=15m}" image: controller:latest name: manager diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 37c4ae3cbc1b..292272b0b034 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -23,7 +23,7 @@ spec: - "--leader-elect" - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false},ClusterResourceSet=${EXP_CLUSTER_RESOURCE_SET:=false},ClusterTopology=${CLUSTER_TOPOLOGY:=false},RuntimeSDK=${EXP_RUNTIME_SDK:=false},MachineSetPreflightChecks=${EXP_MACHINE_SET_PREFLIGHT_CHECKS:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterResourceSet=${EXP_CLUSTER_RESOURCE_SET:=false},ClusterTopology=${CLUSTER_TOPOLOGY:=false},RuntimeSDK=${EXP_RUNTIME_SDK:=false},MachineSetPreflightChecks=${EXP_MACHINE_SET_PREFLIGHT_CHECKS:=false}" image: controller:latest name: manager env: diff --git a/controlplane/kubeadm/config/rbac/role.yaml b/controlplane/kubeadm/config/rbac/role.yaml index 8bae8c0a19f5..c79787ee7d5e 100644 --- a/controlplane/kubeadm/config/rbac/role.yaml +++ b/controlplane/kubeadm/config/rbac/role.yaml @@ -47,6 +47,12 @@ rules: - get - list - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - machinepools + verbs: + - list - apiGroups: - cluster.x-k8s.io resources: diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index d58125fc9972..22ba4f0ae2a1 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -67,6 +67,7 @@ const ( // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io;bootstrap.cluster.x-k8s.io;controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=list // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch // KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object. diff --git a/controlplane/kubeadm/main.go b/controlplane/kubeadm/main.go index 8e8646b5c297..cbb91cd1364d 100644 --- a/controlplane/kubeadm/main.go +++ b/controlplane/kubeadm/main.go @@ -52,6 +52,7 @@ import ( kubeadmcontrolplanecontrollers "sigs.k8s.io/cluster-api/controlplane/kubeadm/controllers" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" kcpwebhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/webhooks" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" controlplanev1alpha3 "sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha3" controlplanev1alpha4 "sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha4" @@ -92,6 +93,7 @@ var ( func init() { _ = clientgoscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) + _ = expv1.AddToScheme(scheme) _ = controlplanev1alpha3.AddToScheme(scheme) _ = controlplanev1alpha4.AddToScheme(scheme) _ = controlplanev1.AddToScheme(scheme) diff --git a/docs/book/src/developer/testing.md b/docs/book/src/developer/testing.md index 961a9ec18fef..42938ef52340 100644 --- a/docs/book/src/developer/testing.md +++ b/docs/book/src/developer/testing.md @@ -27,8 +27,8 @@ if this is not possible, a viable solution is to use mocks (e.g CAPA). ### Generic providers When writing tests core Cluster API contributors should ensure that the code works with any providers, and thus it is required -to not use any specific provider implementation. Instead, the so-called generic providers e.g. "GenericInfrastructureCluster" -should be used because they implement the plain Cluster API contract. This prevents tests from relying on assumptions that +to not use any specific provider implementation. Instead, the so-called generic providers e.g. "GenericInfrastructureCluster" +should be used because they implement the plain Cluster API contract. This prevents tests from relying on assumptions that may not hold true in all cases. Please note that in the long term we would like to improve the implementation of generic providers, centralizing @@ -46,11 +46,11 @@ the test cluster. With this approach it is possible to interact with Cluster API almost like in a real environment, by creating/updating Kubernetes objects and waiting for the controllers to take action. See the [quick reference](#quick-reference) below for more details. -Also in case of integration tests, considerations about [mocking external APIs](#mocking-external-apis) and usage of [generic providers](#generic-providers) apply. +Also in case of integration tests, considerations about [mocking external APIs](#mocking-external-apis) and usage of [generic providers](#generic-providers) apply. ## Fuzzing tests -Fuzzing tests automatically inject randomly generated inputs, often invalid or with unexpected values, into functions to discover vulnerabilities. +Fuzzing tests automatically inject randomly generated inputs, often invalid or with unexpected values, into functions to discover vulnerabilities. Two different types of fuzzing are currently being used on the Cluster API repository: @@ -78,7 +78,7 @@ In light of continuing improving our practice around this ambitious goal, we are Each contribution in growing this set of utilities or their adoption across the codebase is more than welcome! -Another consideration that can help in improving test maintainability is the idea of testing "by layers"; this idea could +Another consideration that can help in improving test maintainability is the idea of testing "by layers"; this idea could apply whenever we are testing "higher-level" functions that internally uses one or more "lower-level" functions; in order to avoid writing/maintaining redundant tests, whenever possible contributors should take care of testing _only_ the logic that is implemented in the "higher-level" function, delegating the test function called internally @@ -243,27 +243,26 @@ Execute the run configuration with `Debug`.

Tips

-The e2e tests create a new management cluster with kind on each run. To avoid this and speed up the test execution the tests can +The e2e tests create a new management cluster with kind on each run. To avoid this and speed up the test execution the tests can also be run against a management cluster created by [tilt](./tilt.md): ```bash # Create a kind cluster ./hack/kind-install-for-capd.sh # Set up the management cluster via tilt -tilt up +tilt up ``` Now you can start the e2e test via IDE as described above but with the additional `-e2e.use-existing-cluster=true` flag. **Note**: This can also be used to debug controllers during e2e tests as described in [Developing Cluster API with Tilt](./tilt.md#wiring-up-debuggers). -The e2e tests also create a local clusterctl repository. After it has been created on a first test execution this step can also be -skipped by setting `-e2e.clusterctl-config=/repository/clusterctl-config.yaml`. This also works with a clusterctl repository created +The e2e tests also create a local clusterctl repository. After it has been created on a first test execution this step can also be +skipped by setting `-e2e.clusterctl-config=/repository/clusterctl-config.yaml`. This also works with a clusterctl repository created via [Create the local repository](http://localhost:3000/clusterctl/developers.html#create-the-local-repository). **Feature gates**: E2E tests often use features which need to be enabled first. Make sure to enable the feature gates in the tilt settings file: ```yaml kustomize_substitutions: CLUSTER_TOPOLOGY: "true" - EXP_MACHINE_POOL: "true" EXP_CLUSTER_RESOURCE_SET: "true" EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true" EXP_RUNTIME_SDK: "true" @@ -352,7 +351,7 @@ As alternative to loki, JSON logs can be visualized with a human readable timest The `(. | tostring)` part could also be customized to only output parts of the JSON logline. E.g.: - + * `(.err)` to only output the error message part. * `(.msg)` to only output the message part. * `(.controller + " " + .msg)` to output the controller name and message part. @@ -405,7 +404,7 @@ func TestMain(m *testing.M) { ``` Most notably, [envtest] provides not only a real API server to use during testing, but it offers the opportunity -to configure one or more controllers to run against the test cluster, as well as creating informers index. +to configure one or more controllers to run against the test cluster, as well as creating informers index. ```golang func TestMain(m *testing.M) { @@ -423,7 +422,7 @@ func TestMain(m *testing.M) { if err := index.AddDefaultIndexes(ctx, mgr); err != nil { panic(fmt.Sprintf("unable to setup index: %v", err)) } - + // Run tests ... } @@ -440,8 +439,8 @@ should take care in ensuring each test runs in isolation from the others, by: - Avoiding object name conflict. Developers should also be aware of the fact that the informers cache used to access the [envtest] -depends on actual etcd watches/API calls for updates, and thus it could happen that after creating -or deleting objects the cache takes a few milliseconds to get updated. This can lead to test flakes, +depends on actual etcd watches/API calls for updates, and thus it could happen that after creating +or deleting objects the cache takes a few milliseconds to get updated. This can lead to test flakes, and thus it always recommended to use patterns like create and wait or delete and wait; Cluster API env test provides a set of utils for this scope. @@ -530,7 +529,7 @@ comes with a set of limitations that could hamper the validity of a test, most n of the test objects. - the [fakeclient] does not use a cache based on informers/API calls/etcd watches, so the test written in this way can't help in surfacing race conditions related to how those components behave in real cluster. -- there is no support for cache index/operations using cache indexes. +- there is no support for cache index/operations using cache indexes. Accordingly, using [fakeclient] is not suitable for all the use cases, so in some cases contributors will be required to use [envtest] instead. In case of doubts about which one to use when writing tests, don't hesitate to ask for diff --git a/docs/book/src/developer/tilt.md b/docs/book/src/developer/tilt.md index 3dec91b63380..40f1c6a777fa 100644 --- a/docs/book/src/developer/tilt.md +++ b/docs/book/src/developer/tilt.md @@ -46,7 +46,7 @@ enable_providers: - kubeadm-control-plane ``` -To use tilt to launch a provider with its own repo, using Cluster API Provider AWS here, `tilt-settings.yaml` should look like: +To use tilt to launch a provider with its own repo, using Cluster API Provider AWS here, `tilt-settings.yaml` should look like: ```yaml default_registry: gcr.io/your-project-name-here @@ -108,7 +108,6 @@ provider's yaml. These substitutions are also used when deploying cluster templa ```yaml kustomize_substitutions: CLUSTER_TOPOLOGY: "true" - EXP_MACHINE_POOL: "true" EXP_CLUSTER_RESOURCE_SET: "true" EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true" EXP_RUNTIME_SDK: "true" @@ -409,7 +408,7 @@ build it. **live_reload_deps**: a list of files/directories to watch. If any of them changes, Tilt rebuilds the manager binary for the provider and performs a live update of the running container. -**version**: allows to define the version to be used for the Provider CR. If empty, a default version will +**version**: allows to define the version to be used for the Provider CR. If empty, a default version will be used. **additional_docker_helper_commands** (String, default=""): Additional commands to be run in the helper image diff --git a/docs/book/src/tasks/experimental-features/experimental-features.md b/docs/book/src/tasks/experimental-features/experimental-features.md index 6144eced25c9..987d604acd7c 100644 --- a/docs/book/src/tasks/experimental-features/experimental-features.md +++ b/docs/book/src/tasks/experimental-features/experimental-features.md @@ -31,7 +31,6 @@ One way is to set experimental variables on the clusterctl config file. For CAPI ```yaml variables: EXP_CLUSTER_RESOURCE_SET: "true" - EXP_MACHINE_POOL: "true" CLUSTER_TOPOLOGY: "true" EXP_RUNTIME_SDK: "true" EXP_MACHINE_SET_PREFLIGHT_CHECKS: "true" @@ -46,7 +45,6 @@ On development environments started with `Tilt`, features can be enabled by sett ```yaml kustomize_substitutions: EXP_CLUSTER_RESOURCE_SET: 'true' - EXP_MACHINE_POOL: 'true' CLUSTER_TOPOLOGY: 'true' EXP_RUNTIME_SDK: 'true' EXP_MACHINE_SET_PREFLIGHT_CHECKS: 'true' diff --git a/docs/book/src/tasks/experimental-features/machine-pools.md b/docs/book/src/tasks/experimental-features/machine-pools.md index 521550e2922b..84c15bfc21ec 100644 --- a/docs/book/src/tasks/experimental-features/machine-pools.md +++ b/docs/book/src/tasks/experimental-features/machine-pools.md @@ -1,4 +1,4 @@ -# Experimental Feature: MachinePool (alpha) +# Experimental Feature: MachinePool (beta) The `MachinePool` feature provides a way to manage a set of machines by defining a common configuration, number of desired machine replicas etc. similar to `MachineDeployment`, except `MachineSet` controllers are responsible for the lifecycle management of the machines for `MachineDeployment`, whereas in `MachinePools`, diff --git a/docs/book/src/user/quick-start.md b/docs/book/src/user/quick-start.md index 075dda19eccc..90576bb94b93 100644 --- a/docs/book/src/user/quick-start.md +++ b/docs/book/src/user/quick-start.md @@ -539,9 +539,6 @@ for the upgrade from v1.23 to v1.24 as we have to use different cgroupDrivers de # Enable the experimental Cluster topology feature. export CLUSTER_TOPOLOGY=true -# Enable the experimental Machine Pool feature -export EXP_MACHINE_POOL=true - # Initialize the management cluster clusterctl init --infrastructure docker ``` diff --git a/exp/internal/controllers/machinepool_controller_phases_test.go b/exp/internal/controllers/machinepool_controller_phases_test.go index 425daf37fdb3..1e4e3d64a74a 100644 --- a/exp/internal/controllers/machinepool_controller_phases_test.go +++ b/exp/internal/controllers/machinepool_controller_phases_test.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -40,7 +39,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -1236,10 +1234,6 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) { func TestReconcileMachinePoolMachines(t *testing.T) { t.Run("Reconcile MachinePool Machines", func(t *testing.T) { - // NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool. - // Enabling the feature flag temporarily for this test. - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() - g := NewWithT(t) ns, err := env.CreateNamespace(ctx, "test-machinepool-machines") diff --git a/exp/internal/webhooks/machinepool_test.go b/exp/internal/webhooks/machinepool_test.go index ecda5dadcd42..583b069383c3 100644 --- a/exp/internal/webhooks/machinepool_test.go +++ b/exp/internal/webhooks/machinepool_test.go @@ -23,23 +23,17 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/webhooks/util" ) var ctx = ctrl.SetupSignalHandler() func TestMachinePoolDefault(t *testing.T) { - // NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool. - // Enabling the feature flag temporarily for this test. - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() - g := NewWithT(t) mp := &expv1.MachinePool{ @@ -68,9 +62,6 @@ func TestMachinePoolDefault(t *testing.T) { } func TestMachinePoolBootstrapValidation(t *testing.T) { - // NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool. - // Enabling the feature flag temporarily for this test. - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() tests := []struct { name string bootstrap clusterv1.Bootstrap @@ -127,9 +118,6 @@ func TestMachinePoolBootstrapValidation(t *testing.T) { } func TestMachinePoolNamespaceValidation(t *testing.T) { - // NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool. - // Enabling the feature flag temporarily for this test. - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() tests := []struct { name string expectErr bool @@ -204,9 +192,6 @@ func TestMachinePoolNamespaceValidation(t *testing.T) { } func TestMachinePoolClusterNameImmutable(t *testing.T) { - // NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool. - // Enabling the feature flag temporarily for this test. - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() tests := []struct { name string oldClusterName string @@ -266,9 +251,6 @@ func TestMachinePoolClusterNameImmutable(t *testing.T) { } func TestMachinePoolVersionValidation(t *testing.T) { - // NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool. - // Enabling the feature flag temporarily for this test. - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() tests := []struct { name string expectErr bool diff --git a/feature/feature.go b/feature/feature.go index ff2bb33dc4bf..b44e0bc3d6b9 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -70,7 +70,7 @@ func init() { // To add a new feature, define a key for it above and add it here. var defaultClusterAPIFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ // Every feature should be initiated here: - MachinePool: {Default: false, PreRelease: featuregate.Alpha}, + MachinePool: {Default: true, PreRelease: featuregate.Beta}, ClusterResourceSet: {Default: true, PreRelease: featuregate.Beta}, ClusterTopology: {Default: false, PreRelease: featuregate.Alpha}, KubeadmBootstrapFormatIgnition: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 3f2013b810f3..b1be769fa553 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -64,7 +64,6 @@ var ( func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 5 * time.Second @@ -113,7 +112,6 @@ func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) { func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 5 * time.Second @@ -166,7 +164,6 @@ func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) { func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 300 * time.Second @@ -258,7 +255,6 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 5 * time.Second @@ -359,7 +355,6 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 30 * time.Second @@ -441,7 +436,6 @@ func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) { func TestClusterReconciler_reconcileDelete(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() catalog := runtimecatalog.New() _ = runtimehooksv1.AddToCatalog(catalog) @@ -595,7 +589,6 @@ func TestClusterReconciler_reconcileDelete(t *testing.T) { // In this case deletion of the ClusterClass should be blocked by the webhook. func TestClusterReconciler_deleteClusterClass(t *testing.T) { defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() g := NewWithT(t) timeout := 5 * time.Second diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index 7dffa9f535be..5ea155aca065 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -33,7 +33,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -45,7 +44,6 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" @@ -2196,8 +2194,6 @@ func TestReconcileMachineDeployments(t *testing.T) { } func TestReconcileMachinePools(t *testing.T) { - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() - g := NewWithT(t) infrastructureMachinePool1 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-1").Build() diff --git a/test/infrastructure/docker/config/manager/manager.yaml b/test/infrastructure/docker/config/manager/manager.yaml index 56fc71bb9239..29c2661bbca9 100644 --- a/test/infrastructure/docker/config/manager/manager.yaml +++ b/test/infrastructure/docker/config/manager/manager.yaml @@ -20,7 +20,7 @@ spec: - "--leader-elect" - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false},ClusterTopology=${CLUSTER_TOPOLOGY:=false}" + - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterTopology=${CLUSTER_TOPOLOGY:=false}" image: controller:latest name: manager env: