Skip to content

Commit

Permalink
Set MachinePool feature flag to true + Beta
Browse files Browse the repository at this point in the history
  • Loading branch information
mboersma committed Mar 15, 2024
1 parent 15615f6 commit e961abe
Show file tree
Hide file tree
Showing 16 changed files with 31 additions and 64 deletions.
2 changes: 1 addition & 1 deletion bootstrap/kubeadm/config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ spec:
- "--leader-elect"
- "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}"
- "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}"
- "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false}"
- "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},KubeadmBootstrapFormatIgnition=${EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION:=false}"
- "--bootstrap-token-ttl=${KUBEADM_BOOTSTRAP_TOKEN_TTL:=15m}"
image: controller:latest
name: manager
Expand Down
2 changes: 1 addition & 1 deletion config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ spec:
- "--leader-elect"
- "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}"
- "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}"
- "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false},ClusterResourceSet=${EXP_CLUSTER_RESOURCE_SET:=false},ClusterTopology=${CLUSTER_TOPOLOGY:=false},RuntimeSDK=${EXP_RUNTIME_SDK:=false},MachineSetPreflightChecks=${EXP_MACHINE_SET_PREFLIGHT_CHECKS:=false}"
- "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=true},ClusterResourceSet=${EXP_CLUSTER_RESOURCE_SET:=false},ClusterTopology=${CLUSTER_TOPOLOGY:=false},RuntimeSDK=${EXP_RUNTIME_SDK:=false},MachineSetPreflightChecks=${EXP_MACHINE_SET_PREFLIGHT_CHECKS:=false}"
image: controller:latest
name: manager
env:
Expand Down
6 changes: 6 additions & 0 deletions controlplane/kubeadm/config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,12 @@ rules:
- get
- list
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
- machinepools
verbs:
- list
- apiGroups:
- cluster.x-k8s.io
resources:
Expand Down
1 change: 1 addition & 0 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ const (
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io;bootstrap.cluster.x-k8s.io;controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=list
// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch

// KubeadmControlPlaneReconciler reconciles a KubeadmControlPlane object.
Expand Down
2 changes: 2 additions & 0 deletions controlplane/kubeadm/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ import (
kubeadmcontrolplanecontrollers "sigs.k8s.io/cluster-api/controlplane/kubeadm/controllers"
"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd"
kcpwebhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/webhooks"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
controlplanev1alpha3 "sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha3"
controlplanev1alpha4 "sigs.k8s.io/cluster-api/internal/apis/controlplane/kubeadm/v1alpha4"
Expand Down Expand Up @@ -92,6 +93,7 @@ var (
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = clusterv1.AddToScheme(scheme)
_ = expv1.AddToScheme(scheme)
_ = controlplanev1alpha3.AddToScheme(scheme)
_ = controlplanev1alpha4.AddToScheme(scheme)
_ = controlplanev1.AddToScheme(scheme)
Expand Down
31 changes: 15 additions & 16 deletions docs/book/src/developer/testing.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ if this is not possible, a viable solution is to use mocks (e.g CAPA).

### Generic providers
When writing tests core Cluster API contributors should ensure that the code works with any providers, and thus it is required
to not use any specific provider implementation. Instead, the so-called generic providers e.g. "GenericInfrastructureCluster"
should be used because they implement the plain Cluster API contract. This prevents tests from relying on assumptions that
to not use any specific provider implementation. Instead, the so-called generic providers e.g. "GenericInfrastructureCluster"
should be used because they implement the plain Cluster API contract. This prevents tests from relying on assumptions that
may not hold true in all cases.

Please note that in the long term we would like to improve the implementation of generic providers, centralizing
Expand All @@ -46,11 +46,11 @@ the test cluster.
With this approach it is possible to interact with Cluster API almost like in a real environment, by creating/updating
Kubernetes objects and waiting for the controllers to take action. See the [quick reference](#quick-reference) below for more details.

Also in case of integration tests, considerations about [mocking external APIs](#mocking-external-apis) and usage of [generic providers](#generic-providers) apply.
Also in case of integration tests, considerations about [mocking external APIs](#mocking-external-apis) and usage of [generic providers](#generic-providers) apply.

## Fuzzing tests

Fuzzing tests automatically inject randomly generated inputs, often invalid or with unexpected values, into functions to discover vulnerabilities.
Fuzzing tests automatically inject randomly generated inputs, often invalid or with unexpected values, into functions to discover vulnerabilities.

Two different types of fuzzing are currently being used on the Cluster API repository:

Expand Down Expand Up @@ -78,7 +78,7 @@ In light of continuing improving our practice around this ambitious goal, we are

Each contribution in growing this set of utilities or their adoption across the codebase is more than welcome!

Another consideration that can help in improving test maintainability is the idea of testing "by layers"; this idea could
Another consideration that can help in improving test maintainability is the idea of testing "by layers"; this idea could
apply whenever we are testing "higher-level" functions that internally uses one or more "lower-level" functions;
in order to avoid writing/maintaining redundant tests, whenever possible contributors should take care of testing
_only_ the logic that is implemented in the "higher-level" function, delegating the test function called internally
Expand Down Expand Up @@ -243,27 +243,26 @@ Execute the run configuration with `Debug`.

<h1>Tips</h1>

The e2e tests create a new management cluster with kind on each run. To avoid this and speed up the test execution the tests can
The e2e tests create a new management cluster with kind on each run. To avoid this and speed up the test execution the tests can
also be run against a management cluster created by [tilt](./tilt.md):
```bash
# Create a kind cluster
./hack/kind-install-for-capd.sh
# Set up the management cluster via tilt
tilt up
tilt up
```
Now you can start the e2e test via IDE as described above but with the additional `-e2e.use-existing-cluster=true` flag.

**Note**: This can also be used to debug controllers during e2e tests as described in [Developing Cluster API with Tilt](./tilt.md#wiring-up-debuggers).

The e2e tests also create a local clusterctl repository. After it has been created on a first test execution this step can also be
skipped by setting `-e2e.clusterctl-config=<ARTIFACTS>/repository/clusterctl-config.yaml`. This also works with a clusterctl repository created
The e2e tests also create a local clusterctl repository. After it has been created on a first test execution this step can also be
skipped by setting `-e2e.clusterctl-config=<ARTIFACTS>/repository/clusterctl-config.yaml`. This also works with a clusterctl repository created
via [Create the local repository](http://localhost:3000/clusterctl/developers.html#create-the-local-repository).

**Feature gates**: E2E tests often use features which need to be enabled first. Make sure to enable the feature gates in the tilt settings file:
```yaml
kustomize_substitutions:
CLUSTER_TOPOLOGY: "true"
EXP_MACHINE_POOL: "true"
EXP_CLUSTER_RESOURCE_SET: "true"
EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true"
EXP_RUNTIME_SDK: "true"
Expand Down Expand Up @@ -352,7 +351,7 @@ As alternative to loki, JSON logs can be visualized with a human readable timest

The `(. | tostring)` part could also be customized to only output parts of the JSON logline.
E.g.:

* `(.err)` to only output the error message part.
* `(.msg)` to only output the message part.
* `(.controller + " " + .msg)` to output the controller name and message part.
Expand Down Expand Up @@ -405,7 +404,7 @@ func TestMain(m *testing.M) {
```

Most notably, [envtest] provides not only a real API server to use during testing, but it offers the opportunity
to configure one or more controllers to run against the test cluster, as well as creating informers index.
to configure one or more controllers to run against the test cluster, as well as creating informers index.

```golang
func TestMain(m *testing.M) {
Expand All @@ -423,7 +422,7 @@ func TestMain(m *testing.M) {
if err := index.AddDefaultIndexes(ctx, mgr); err != nil {
panic(fmt.Sprintf("unable to setup index: %v", err))
}

// Run tests
...
}
Expand All @@ -440,8 +439,8 @@ should take care in ensuring each test runs in isolation from the others, by:
- Avoiding object name conflict.

Developers should also be aware of the fact that the informers cache used to access the [envtest]
depends on actual etcd watches/API calls for updates, and thus it could happen that after creating
or deleting objects the cache takes a few milliseconds to get updated. This can lead to test flakes,
depends on actual etcd watches/API calls for updates, and thus it could happen that after creating
or deleting objects the cache takes a few milliseconds to get updated. This can lead to test flakes,
and thus it always recommended to use patterns like create and wait or delete and wait; Cluster API env
test provides a set of utils for this scope.

Expand Down Expand Up @@ -530,7 +529,7 @@ comes with a set of limitations that could hamper the validity of a test, most n
of the test objects.
- the [fakeclient] does not use a cache based on informers/API calls/etcd watches, so the test written in this way
can't help in surfacing race conditions related to how those components behave in real cluster.
- there is no support for cache index/operations using cache indexes.
- there is no support for cache index/operations using cache indexes.

Accordingly, using [fakeclient] is not suitable for all the use cases, so in some cases contributors will be required
to use [envtest] instead. In case of doubts about which one to use when writing tests, don't hesitate to ask for
Expand Down
5 changes: 2 additions & 3 deletions docs/book/src/developer/tilt.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ enable_providers:
- kubeadm-control-plane
```

To use tilt to launch a provider with its own repo, using Cluster API Provider AWS here, `tilt-settings.yaml` should look like:
To use tilt to launch a provider with its own repo, using Cluster API Provider AWS here, `tilt-settings.yaml` should look like:

```yaml
default_registry: gcr.io/your-project-name-here
Expand Down Expand Up @@ -108,7 +108,6 @@ provider's yaml. These substitutions are also used when deploying cluster templa
```yaml
kustomize_substitutions:
CLUSTER_TOPOLOGY: "true"
EXP_MACHINE_POOL: "true"
EXP_CLUSTER_RESOURCE_SET: "true"
EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true"
EXP_RUNTIME_SDK: "true"
Expand Down Expand Up @@ -409,7 +408,7 @@ build it.
**live_reload_deps**: a list of files/directories to watch. If any of them changes, Tilt rebuilds the manager binary
for the provider and performs a live update of the running container.
**version**: allows to define the version to be used for the Provider CR. If empty, a default version will
**version**: allows to define the version to be used for the Provider CR. If empty, a default version will
be used.
**additional_docker_helper_commands** (String, default=""): Additional commands to be run in the helper image
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ One way is to set experimental variables on the clusterctl config file. For CAPI
```yaml
variables:
EXP_CLUSTER_RESOURCE_SET: "true"
EXP_MACHINE_POOL: "true"
CLUSTER_TOPOLOGY: "true"
EXP_RUNTIME_SDK: "true"
EXP_MACHINE_SET_PREFLIGHT_CHECKS: "true"
Expand All @@ -46,7 +45,6 @@ On development environments started with `Tilt`, features can be enabled by sett
```yaml
kustomize_substitutions:
EXP_CLUSTER_RESOURCE_SET: 'true'
EXP_MACHINE_POOL: 'true'
CLUSTER_TOPOLOGY: 'true'
EXP_RUNTIME_SDK: 'true'
EXP_MACHINE_SET_PREFLIGHT_CHECKS: 'true'
Expand Down
2 changes: 1 addition & 1 deletion docs/book/src/tasks/experimental-features/machine-pools.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Experimental Feature: MachinePool (alpha)
# Experimental Feature: MachinePool (beta)

The `MachinePool` feature provides a way to manage a set of machines by defining a common configuration, number of desired machine replicas etc. similar to `MachineDeployment`,
except `MachineSet` controllers are responsible for the lifecycle management of the machines for `MachineDeployment`, whereas in `MachinePools`,
Expand Down
3 changes: 0 additions & 3 deletions docs/book/src/user/quick-start.md
Original file line number Diff line number Diff line change
Expand Up @@ -539,9 +539,6 @@ for the upgrade from v1.23 to v1.24 as we have to use different cgroupDrivers de
# Enable the experimental Cluster topology feature.
export CLUSTER_TOPOLOGY=true
# Enable the experimental Machine Pool feature
export EXP_MACHINE_POOL=true
# Initialize the management cluster
clusterctl init --infrastructure docker
```
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
utilfeature "k8s.io/component-base/featuregate/testing"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand All @@ -40,7 +39,6 @@ import (
"sigs.k8s.io/cluster-api/controllers/external"
"sigs.k8s.io/cluster-api/controllers/remote"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/test/builder"
"sigs.k8s.io/cluster-api/internal/util/ssa"
"sigs.k8s.io/cluster-api/util/kubeconfig"
Expand Down Expand Up @@ -1236,10 +1234,6 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) {

func TestReconcileMachinePoolMachines(t *testing.T) {
t.Run("Reconcile MachinePool Machines", func(t *testing.T) {
// NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool.
// Enabling the feature flag temporarily for this test.
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()

g := NewWithT(t)

ns, err := env.CreateNamespace(ctx, "test-machinepool-machines")
Expand Down
18 changes: 0 additions & 18 deletions exp/internal/webhooks/machinepool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,23 +23,17 @@ import (
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/component-base/featuregate/testing"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/webhooks/util"
)

var ctx = ctrl.SetupSignalHandler()

func TestMachinePoolDefault(t *testing.T) {
// NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool.
// Enabling the feature flag temporarily for this test.
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()

g := NewWithT(t)

mp := &expv1.MachinePool{
Expand Down Expand Up @@ -68,9 +62,6 @@ func TestMachinePoolDefault(t *testing.T) {
}

func TestMachinePoolBootstrapValidation(t *testing.T) {
// NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool.
// Enabling the feature flag temporarily for this test.
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
tests := []struct {
name string
bootstrap clusterv1.Bootstrap
Expand Down Expand Up @@ -127,9 +118,6 @@ func TestMachinePoolBootstrapValidation(t *testing.T) {
}

func TestMachinePoolNamespaceValidation(t *testing.T) {
// NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool.
// Enabling the feature flag temporarily for this test.
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
tests := []struct {
name string
expectErr bool
Expand Down Expand Up @@ -204,9 +192,6 @@ func TestMachinePoolNamespaceValidation(t *testing.T) {
}

func TestMachinePoolClusterNameImmutable(t *testing.T) {
// NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool.
// Enabling the feature flag temporarily for this test.
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
tests := []struct {
name string
oldClusterName string
Expand Down Expand Up @@ -266,9 +251,6 @@ func TestMachinePoolClusterNameImmutable(t *testing.T) {
}

func TestMachinePoolVersionValidation(t *testing.T) {
// NOTE: MachinePool feature flag is disabled by default, thus preventing to create or update MachinePool.
// Enabling the feature flag temporarily for this test.
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
tests := []struct {
name string
expectErr bool
Expand Down
2 changes: 1 addition & 1 deletion feature/feature.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func init() {
// To add a new feature, define a key for it above and add it here.
var defaultClusterAPIFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
// Every feature should be initiated here:
MachinePool: {Default: false, PreRelease: featuregate.Alpha},
MachinePool: {Default: true, PreRelease: featuregate.Beta},
ClusterResourceSet: {Default: true, PreRelease: featuregate.Beta},
ClusterTopology: {Default: false, PreRelease: featuregate.Alpha},
KubeadmBootstrapFormatIgnition: {Default: false, PreRelease: featuregate.Alpha},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ var (

func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 5 * time.Second

Expand Down Expand Up @@ -113,7 +112,6 @@ func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) {

func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()

g := NewWithT(t)
timeout := 5 * time.Second
Expand Down Expand Up @@ -166,7 +164,6 @@ func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) {

func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 300 * time.Second

Expand Down Expand Up @@ -258,7 +255,6 @@ func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) {

func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 5 * time.Second

Expand Down Expand Up @@ -359,7 +355,6 @@ func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) {

func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 30 * time.Second

Expand Down Expand Up @@ -441,7 +436,6 @@ func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) {

func TestClusterReconciler_reconcileDelete(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()

catalog := runtimecatalog.New()
_ = runtimehooksv1.AddToCatalog(catalog)
Expand Down Expand Up @@ -595,7 +589,6 @@ func TestClusterReconciler_reconcileDelete(t *testing.T) {
// In this case deletion of the ClusterClass should be blocked by the webhook.
func TestClusterReconciler_deleteClusterClass(t *testing.T) {
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
g := NewWithT(t)
timeout := 5 * time.Second

Expand Down
Loading

0 comments on commit e961abe

Please sign in to comment.