diff --git a/internal/controllers/topology/cluster/blueprint_test.go b/internal/controllers/topology/cluster/blueprint_test.go index 621328b62a49..1b6461f7eb99 100644 --- a/internal/controllers/topology/cluster/blueprint_test.go +++ b/internal/controllers/topology/cluster/blueprint_test.go @@ -56,6 +56,8 @@ func TestGetBlueprint(t *testing.T) { workerInfrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "workerinframachinetemplate1"). Build() + workerInfrastructureMachinePoolTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "workerinframachinepooltemplate1"). + Build() workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "workerbootstraptemplate1"). Build() machineHealthCheck := &clusterv1.MachineHealthCheckClass{ @@ -73,6 +75,14 @@ func TestGetBlueprint(t *testing.T) { mds := []clusterv1.MachineDeploymentClass{*machineDeployment} + machinePools := builder.MachinePoolClass("workerclass2"). + WithLabels(map[string]string{"foo": "bar"}). + WithAnnotations(map[string]string{"a": "b"}). + WithInfrastructureTemplate(workerInfrastructureMachinePoolTemplate). + WithBootstrapTemplate(workerBootstrapTemplate). + Build() + mps := []clusterv1.MachinePoolClass{*machinePools} + // Define test cases. tests := []struct { name string @@ -141,6 +151,7 @@ func TestGetBlueprint(t *testing.T) { Template: controlPlaneTemplate, }, MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, }, }, { @@ -167,6 +178,7 @@ func TestGetBlueprint(t *testing.T) { InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, }, MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, }, }, { @@ -217,6 +229,7 @@ func TestGetBlueprint(t *testing.T) { MachineHealthCheck: machineHealthCheck, }, }, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, }, }, { @@ -276,8 +289,60 @@ func TestGetBlueprint(t *testing.T) { MachineHealthCheck: machineHealthCheck, }, MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, }, }, + { + name: "Should read a ClusterClass with a MachinePoolClass", + clusterClass: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate(infraClusterTemplate). + WithControlPlaneTemplate(controlPlaneTemplate). + WithWorkerMachinePoolClasses(mps...). + Build(), + objects: []client.Object{ + infraClusterTemplate, + controlPlaneTemplate, + workerInfrastructureMachinePoolTemplate, + workerBootstrapTemplate, + }, + want: &scope.ClusterBlueprint{ + ClusterClass: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate(infraClusterTemplate). + WithControlPlaneTemplate(controlPlaneTemplate). + WithWorkerMachinePoolClasses(mps...). + Build(), + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplate, + }, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "workerclass2": { + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + Annotations: map[string]string{"a": "b"}, + }, + InfrastructureMachinePoolTemplate: workerInfrastructureMachinePoolTemplate, + BootstrapTemplate: workerBootstrapTemplate, + }, + }, + }, + }, + { + name: "Fails if ClusterClass has a MachinePoolClass referencing a BootstrapConfig that does not exist", + clusterClass: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate(infraClusterTemplate). + WithControlPlaneTemplate(controlPlaneTemplate). + WithWorkerMachinePoolClasses(mps...). + Build(), + objects: []client.Object{ + infraClusterTemplate, + controlPlaneTemplate, + workerInfrastructureMachinePoolTemplate, + // workerInfrastructureMachinePoolTemplate is missing! + }, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -334,6 +399,7 @@ func TestGetBlueprint(t *testing.T) { g.Expect(tt.want.InfrastructureClusterTemplate).To(EqualObject(got.InfrastructureClusterTemplate), cmp.Diff(got.InfrastructureClusterTemplate, tt.want.InfrastructureClusterTemplate)) g.Expect(got.ControlPlane).To(BeComparableTo(tt.want.ControlPlane), cmp.Diff(got.ControlPlane, tt.want.ControlPlane)) g.Expect(tt.want.MachineDeployments).To(BeComparableTo(got.MachineDeployments), got.MachineDeployments, tt.want.MachineDeployments) + g.Expect(tt.want.MachinePools).To(BeComparableTo(got.MachinePools), got.MachinePools, tt.want.MachinePools) }) } } diff --git a/internal/controllers/topology/cluster/current_state_test.go b/internal/controllers/topology/cluster/current_state_test.go index cea202a5e82e..4c1fff6db494 100644 --- a/internal/controllers/topology/cluster/current_state_test.go +++ b/internal/controllers/topology/cluster/current_state_test.go @@ -157,6 +157,35 @@ func TestGetCurrentState(t *testing.T) { WithClusterName("cluster1"). Build() + // MachinePool and related objects. + emptyMachinePools := make(map[string]*scope.MachinePoolState) + + machinePoolInfrastructure := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "infra2"). + Build() + machinePoolInfrastructure.SetLabels(map[string]string{clusterv1.ClusterTopologyOwnedLabel: ""}) + machinePoolBootstrap := builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap2"). + Build() + machinePoolBootstrap.SetLabels(map[string]string{clusterv1.ClusterTopologyOwnedLabel: ""}) + + machinePool := builder.MachinePool(metav1.NamespaceDefault, "mp1"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "cluster1", + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "mp1", + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build() + machinePool2 := builder.MachinePool(metav1.NamespaceDefault, "mp2"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "cluster1", + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "mp2", + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build() + tests := []struct { name string cluster *clusterv1.Cluster @@ -177,6 +206,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: nil, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -233,7 +263,7 @@ func TestGetCurrentState(t *testing.T) { objects: []client.Object{ infraCluster, }, - // Expecting valid return with no ControlPlane or MachineDeployment state defined but with a valid Infrastructure state. + // Expecting valid return with no ControlPlane, MachineDeployment, or MachinePool state defined but with a valid Infrastructure state. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). @@ -241,6 +271,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: infraCluster, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -262,7 +293,7 @@ func TestGetCurrentState(t *testing.T) { clusterClassWithNoControlPlaneInfra, // Workers are missing! }, - // Expecting valid return with ControlPlane, no ControlPlane Infrastructure state, InfrastructureCluster state and no defined MachineDeployment state. + // Expecting valid return with ControlPlane, no ControlPlane Infrastructure state, InfrastructureCluster state, and no defined MachineDeployment or MachinePool state. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithControlPlane(controlPlane). @@ -271,6 +302,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{Object: controlPlane, InfrastructureMachineTemplate: nil}, InfrastructureCluster: infraCluster, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -312,7 +344,7 @@ func TestGetCurrentState(t *testing.T) { controlPlaneInfrastructureMachineTemplate, // Workers are missing! }, - // Expecting valid return with valid ControlPlane state, but no ControlPlane Infrastructure, InfrastructureCluster or MachineDeployment state defined. + // Expecting valid return with valid ControlPlane state, but no ControlPlane Infrastructure, InfrastructureCluster, MachineDeployment, or MachinePool state defined. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithControlPlane(controlPlaneWithInfra). @@ -320,6 +352,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, InfrastructureCluster: nil, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -343,7 +376,7 @@ func TestGetCurrentState(t *testing.T) { controlPlaneWithInfra, // Workers are missing! }, - // Expecting valid return with valid ControlPlane state, ControlPlane Infrastructure state and InfrastructureCluster state, but no defined MachineDeployment state. + // Expecting valid return with valid ControlPlane state, ControlPlane Infrastructure state and InfrastructureCluster state, but no defined MachineDeployment or MachinePool state. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). @@ -352,6 +385,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, InfrastructureCluster: infraCluster, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -362,6 +396,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), blueprint: &scope.ClusterBlueprint{ @@ -377,6 +415,12 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrap, + InfrastructureMachinePoolTemplate: machinePoolInfrastructure, + }, + }, }, objects: []client.Object{ infraCluster, @@ -386,6 +430,9 @@ func TestGetCurrentState(t *testing.T) { machineDeploymentInfrastructure, machineDeploymentBootstrap, machineDeployment, + machinePoolInfrastructure, + machinePoolBootstrap, + machinePool, }, // Expecting valid return with valid ControlPlane, ControlPlane Infrastructure and InfrastructureCluster state, but no defined MachineDeployment state. want: &scope.ClusterState{ @@ -395,12 +442,18 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: nil, MachineDeployments: map[string]*scope.MachineDeploymentState{ "md1": {Object: machineDeployment, BootstrapTemplate: machineDeploymentBootstrap, InfrastructureMachineTemplate: machineDeploymentInfrastructure}}, + MachinePools: map[string]*scope.MachinePoolState{ + "mp1": {Object: machinePool, BootstrapObject: machinePoolBootstrap, InfrastructureMachinePoolObject: machinePoolInfrastructure}}, }, }, { @@ -443,7 +496,7 @@ func TestGetCurrentState(t *testing.T) { wantErr: true, }, { - name: "Should ignore unmanaged MachineDeployments and MachineDeployments belonging to other clusters", + name: "Should ignore unmanaged MachineDeployments/MachinePools and MachineDeployments/MachinePools belonging to other clusters", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). Build(), blueprint: &scope.ClusterBlueprint{ClusterClass: clusterClassWithControlPlaneInfra}, @@ -466,14 +519,32 @@ func TestGetCurrentState(t *testing.T) { WithBootstrapTemplate(machineDeploymentBootstrap). WithInfrastructureTemplate(machineDeploymentInfrastructure). Build(), + builder.MachinePool(metav1.NamespaceDefault, "no-managed-label"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "cluster1", + // topology.cluster.x-k8s.io/owned label is missing (unmanaged)! + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build(), + builder.MachinePool(metav1.NamespaceDefault, "wrong-cluster-label"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "another-cluster", + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "mp1", + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build(), }, - // Expect valid return with empty MachineDeployments properly filtered by label. + // Expect valid return with empty MachineDeployments and MachinePools properly filtered by label. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). Build(), ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: nil, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -528,7 +599,58 @@ func TestGetCurrentState(t *testing.T) { wantErr: true, }, { - name: "Should read a full Cluster (With InfrastructureCluster, ControlPlane and ControlPlane Infrastructure, MachineDeployments)", + name: "Fails if there are MachinePools without the topology.cluster.x-k8s.io/pool-name", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + Build(), + blueprint: &scope.ClusterBlueprint{ClusterClass: clusterClassWithControlPlaneInfra}, + objects: []client.Object{ + clusterClassWithControlPlaneInfra, + builder.MachinePool(metav1.NamespaceDefault, "missing-topology-mp-labelName"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "cluster1", + clusterv1.ClusterTopologyOwnedLabel: "", + // topology.cluster.x-k8s.io/pool-name label is missing! + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build(), + }, + // Expect error to be thrown as no managed MachinePool is reconcilable unless it has a ClusterTopologyMachinePoolNameLabel. + wantErr: true, + }, + { + name: "Fails if there are MachinePools with the same topology.cluster.x-k8s.io/pool-name", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). + Build()). + Build(), + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + }, + objects: []client.Object{ + clusterClassWithControlPlaneInfra, + machinePoolInfrastructure, + machinePoolBootstrap, + machinePool, + builder.MachinePool(metav1.NamespaceDefault, "duplicate-labels"). + WithLabels(machinePool.Labels). // Another machine pool with the same labels. + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build(), + }, + // Expect error as two MachinePools with the same ClusterTopologyOwnedLabel should not exist for one cluster + wantErr: true, + }, + { + name: "Should read a full Cluster (With InfrastructureCluster, ControlPlane and ControlPlane Infrastructure, MachineDeployments, MachinePools)", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). @@ -537,6 +659,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), blueprint: &scope.ClusterBlueprint{ @@ -552,6 +678,12 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrap, + InfrastructureMachinePoolTemplate: machinePoolInfrastructure, + }, + }, }, objects: []client.Object{ infraCluster, @@ -561,8 +693,11 @@ func TestGetCurrentState(t *testing.T) { machineDeploymentInfrastructure, machineDeploymentBootstrap, machineDeployment, + machinePoolInfrastructure, + machinePoolBootstrap, + machinePool, }, - // Expect valid return of full ClusterState with MachineDeployments properly filtered by label. + // Expect valid return of full ClusterState with MachineDeployments and MachinePools properly filtered by label. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). @@ -572,6 +707,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, @@ -583,10 +722,17 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolState{ + "mp1": { + Object: machinePool, + BootstrapObject: machinePoolBootstrap, + InfrastructureMachinePoolObject: machinePoolInfrastructure, + }, + }, }, }, { - name: "Should read a full Cluster, even if a MachineDeployment topology has been deleted and the MachineDeployment still exists", + name: "Should read a full Cluster, even if a MachineDeployment & MachinePool topology has been deleted and the MachineDeployment & MachinePool still exists", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). @@ -595,6 +741,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), blueprint: &scope.ClusterBlueprint{ @@ -610,6 +760,12 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrap, + InfrastructureMachinePoolTemplate: machinePoolInfrastructure, + }, + }, }, objects: []client.Object{ infraCluster, @@ -620,6 +776,10 @@ func TestGetCurrentState(t *testing.T) { machineDeploymentBootstrap, machineDeployment, machineDeployment2, + machinePoolInfrastructure, + machinePoolBootstrap, + machinePool, + machinePool2, }, // Expect valid return of full ClusterState with MachineDeployments properly filtered by label. want: &scope.ClusterState{ @@ -631,6 +791,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, @@ -647,6 +811,18 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolState{ + "mp1": { + Object: machinePool, + BootstrapObject: machinePoolBootstrap, + InfrastructureMachinePoolObject: machinePoolInfrastructure, + }, + "mp2": { + Object: machinePool2, + BootstrapObject: machinePoolBootstrap, + InfrastructureMachinePoolObject: machinePoolInfrastructure, + }, + }, }, }, { @@ -688,6 +864,45 @@ func TestGetCurrentState(t *testing.T) { // Expect error as Bootstrap Template not defined for MachineDeployments relevant to the Cluster. wantErr: true, }, + { + name: "Fails if a Cluster has a MachinePool without the Bootstrap Template ref", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). + Build()). + Build(), + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrap, + InfrastructureMachinePoolTemplate: machinePoolInfrastructure, + }, + }, + }, + objects: []client.Object{ + infraCluster, + clusterClassWithControlPlaneInfra, + controlPlaneInfrastructureMachineTemplate, + controlPlaneWithInfra, + machinePoolInfrastructure, + builder.MachinePool(metav1.NamespaceDefault, "no-bootstrap"). + WithLabels(machinePool.Labels). + // No BootstrapTemplate reference! + WithInfrastructure(machinePoolInfrastructure). + Build(), + }, + // Expect error as Bootstrap Template not defined for MachinePools relevant to the Cluster. + wantErr: true, + }, { name: "Fails if a Cluster has a MachineDeployments without the InfrastructureMachineTemplate ref", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). @@ -727,6 +942,45 @@ func TestGetCurrentState(t *testing.T) { // Expect error as Infrastructure Template not defined for MachineDeployment relevant to the Cluster. wantErr: true, }, + { + name: "Fails if a Cluster has a MachinePools without the InfrastructureMachineTemplate ref", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). + Build()). + Build(), + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrap, + InfrastructureMachinePoolTemplate: machinePoolInfrastructure, + }, + }, + }, + objects: []client.Object{ + infraCluster, + clusterClassWithControlPlaneInfra, + controlPlaneInfrastructureMachineTemplate, + controlPlaneWithInfra, + machinePoolBootstrap, + builder.MachinePool(metav1.NamespaceDefault, "no-infra"). + WithLabels(machinePool.Labels). + WithBootstrap(machinePoolBootstrap). + // No InfrastructureMachineTemplate reference! + Build(), + }, + // Expect error as Infrastructure Template not defined for MachinePool relevant to the Cluster. + wantErr: true, + }, { name: "Pass reading a full Cluster with MachineHealthChecks for ControlPlane and MachineDeployment)", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). @@ -790,6 +1044,7 @@ func TestGetCurrentState(t *testing.T) { MachineHealthCheck: machineHealthCheckForMachineDeployment, }, }, + MachinePools: emptyMachinePools, }, }, } @@ -839,6 +1094,7 @@ func TestGetCurrentState(t *testing.T) { g.Expect(got.InfrastructureCluster).To(EqualObject(tt.want.InfrastructureCluster)) g.Expect(got.ControlPlane).To(BeComparableTo(tt.want.ControlPlane), cmp.Diff(got.ControlPlane, tt.want.ControlPlane)) g.Expect(got.MachineDeployments).To(BeComparableTo(tt.want.MachineDeployments)) + g.Expect(got.MachinePools).To(BeComparableTo(tt.want.MachinePools)) }) } } diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index 0aed86b5c517..6b3bc280cc54 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -953,6 +953,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c cluster: s.Current.Cluster, namePrefix: bootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name), currentObjectRef: currentBootstrapConfigRef, + ownerRef: ownerReferenceTo(s.Current.Cluster), }) if err != nil { return nil, errors.Wrapf(err, "failed to compute bootstrap object for topology %q", machinePoolTopology.Name) @@ -977,6 +978,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c cluster: s.Current.Cluster, namePrefix: infrastructureMachinePoolNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name), currentObjectRef: currentInfraMachinePoolRef, + ownerRef: ownerReferenceTo(s.Current.Cluster), }) if err != nil { return nil, errors.Wrapf(err, "failed to compute infrastructure object for topology %q", machinePoolTopology.Name) diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/internal/controllers/topology/cluster/desired_state_test.go index b2edb1d7b645..06864395e2ea 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/internal/controllers/topology/cluster/desired_state_test.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" @@ -44,6 +45,8 @@ import ( "sigs.k8s.io/cluster-api/util" ) +const version = "1.21.2" + var ( fakeRef1 = &corev1.ObjectReference{ Kind: "refKind1", @@ -274,7 +277,6 @@ func TestComputeControlPlane(t *testing.T) { Build() // TODO: Replace with object builder. // current cluster objects - version := "v1.21.2" replicas := int32(3) topologyDuration := 10 * time.Second nodeDrainTimeout := metav1.Duration{Duration: topologyDuration} @@ -676,6 +678,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { topologyVersion string controlPlaneObj *unstructured.Unstructured upgradingMachineDeployments []string + upgradingMachinePools []string expectedVersion string wantErr bool }{ @@ -758,6 +761,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { }). Build(), upgradingMachineDeployments: []string{"md1"}, + upgradingMachinePools: []string{"mp1"}, expectedVersion: "v1.2.2", }, { @@ -778,6 +782,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { }). Build(), upgradingMachineDeployments: []string{}, + upgradingMachinePools: []string{}, expectedVersion: "v1.2.3", }, { @@ -846,6 +851,9 @@ func TestComputeControlPlaneVersion(t *testing.T) { if len(tt.upgradingMachineDeployments) > 0 { s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...) } + if len(tt.upgradingMachinePools) > 0 { + s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) + } runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). WithCatalog(catalog). @@ -1341,7 +1349,6 @@ func TestComputeMachineDeployment(t *testing.T) { WithWorkerMachineDeploymentClasses(mcds...). Build() - version := "v1.21.2" cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster1", @@ -1507,7 +1514,7 @@ func TestComputeMachineDeployment(t *testing.T) { Replicas: ¤tReplicas, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.21.2"), + Version: pointer.String(version), Bootstrap: clusterv1.Bootstrap{ ConfigRef: contract.ObjToRef(workerBootstrapTemplate), }, @@ -1717,6 +1724,358 @@ func TestComputeMachineDeployment(t *testing.T) { }) } +func TestComputeMachinePool(t *testing.T) { + workerInfrastructureMachinePoolTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-inframachinepooltemplate"). + Build() + workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstraptemplate"). + Build() + labels := map[string]string{"fizzLabel": "buzz", "fooLabel": "bar"} + annotations := map[string]string{"fizzAnnotation": "buzz", "fooAnnotation": "bar"} + + clusterClassDuration := metav1.Duration{Duration: 20 * time.Second} + var clusterClassMinReadySeconds int32 = 20 + mp1 := builder.MachinePoolClass("linux-worker"). + WithLabels(labels). + WithAnnotations(annotations). + WithInfrastructureTemplate(workerInfrastructureMachinePoolTemplate). + WithBootstrapTemplate(workerBootstrapTemplate). + WithFailureDomains("A", "B"). + WithNodeDrainTimeout(&clusterClassDuration). + WithNodeVolumeDetachTimeout(&clusterClassDuration). + WithNodeDeletionTimeout(&clusterClassDuration). + WithMinReadySeconds(&clusterClassMinReadySeconds). + Build() + mcps := []clusterv1.MachinePoolClass{*mp1} + fakeClass := builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithWorkerMachinePoolClasses(mcps...). + Build() + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.ClusterSpec{ + Topology: &clusterv1.Topology{ + Version: version, + }, + }, + } + + blueprint := &scope.ClusterBlueprint{ + Topology: cluster.Spec.Topology, + ClusterClass: fakeClass, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "linux-worker": { + Metadata: clusterv1.ObjectMeta{ + Labels: labels, + Annotations: annotations, + }, + BootstrapTemplate: workerBootstrapTemplate, + InfrastructureMachinePoolTemplate: workerInfrastructureMachinePoolTemplate, + }, + }, + } + + replicas := int32(5) + topologyFailureDomains := []string{"A", "B"} + topologyDuration := metav1.Duration{Duration: 10 * time.Second} + var topologyMinReadySeconds int32 = 10 + mpTopology := clusterv1.MachinePoolTopology{ + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{ + // Should overwrite the label from the MachinePool class. + "fooLabel": "baz", + }, + Annotations: map[string]string{ + // Should overwrite the annotation from the MachinePool class. + "fooAnnotation": "baz", + // These annotations should not be propagated to the MachinePool. + clusterv1.ClusterTopologyDeferUpgradeAnnotation: "", + clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "", + }, + }, + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: &replicas, + FailureDomains: topologyFailureDomains, + NodeDrainTimeout: &topologyDuration, + NodeVolumeDetachTimeout: &topologyDuration, + NodeDeletionTimeout: &topologyDuration, + MinReadySeconds: &topologyMinReadySeconds, + } + + t.Run("Generates the machine pool and the referenced templates", func(t *testing.T) { + g := NewWithT(t) + scope := scope.New(cluster) + scope.Blueprint = blueprint + + actual, err := computeMachinePool(ctx, scope, mpTopology) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(actual.BootstrapObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines")) + + // Ensure Cluster ownership is added to generated BootstrapObject. + g.Expect(actual.BootstrapObject.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(actual.BootstrapObject.GetOwnerReferences()[0].Kind).To(Equal("Cluster")) + g.Expect(actual.BootstrapObject.GetOwnerReferences()[0].Name).To(Equal(cluster.Name)) + + g.Expect(actual.InfrastructureMachinePoolObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines")) + + // Ensure Cluster ownership is added to generated InfrastructureMachineTemplate. + g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()[0].Kind).To(Equal("Cluster")) + g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()[0].Name).To(Equal(cluster.Name)) + + actualMp := actual.Object + g.Expect(*actualMp.Spec.Replicas).To(Equal(replicas)) + g.Expect(*actualMp.Spec.MinReadySeconds).To(Equal(topologyMinReadySeconds)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration)) + g.Expect(actualMp.Spec.ClusterName).To(Equal("cluster1")) + g.Expect(actualMp.Name).To(ContainSubstring("cluster1")) + g.Expect(actualMp.Name).To(ContainSubstring("big-pool-of-machines")) + g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains)) + + expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations) + delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation) + delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation) + g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations)) + g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations)) + + g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", + }))) + g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", + }))) + + g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).ToNot(Equal("linux-worker-inframachinetemplate")) + g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).ToNot(Equal("linux-worker-bootstraptemplate")) + }) + t.Run("Generates the machine pool and the referenced templates using ClusterClass defaults", func(t *testing.T) { + g := NewWithT(t) + scope := scope.New(cluster) + scope.Blueprint = blueprint + + mpTopology := clusterv1.MachinePoolTopology{ + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{"foo": "baz"}, + }, + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: &replicas, + // missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy + } + + actual, err := computeMachinePool(ctx, scope, mpTopology) + g.Expect(err).ToNot(HaveOccurred()) + + // checking only values from CC defaults + actualMp := actual.Object + g.Expect(*actualMp.Spec.MinReadySeconds).To(Equal(clusterClassMinReadySeconds)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration)) + }) + + t.Run("If there is already a machine pool, it preserves the object name and the reference names", func(t *testing.T) { + g := NewWithT(t) + s := scope.New(cluster) + s.Blueprint = blueprint + + currentReplicas := int32(3) + currentMp := &expv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-pool-1", + }, + Spec: expv1.MachinePoolSpec{ + Replicas: ¤tReplicas, + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: pointer.String(version), + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: contract.ObjToRef(workerBootstrapTemplate), + }, + InfrastructureRef: *contract.ObjToRef(workerInfrastructureMachinePoolTemplate), + }, + }, + }, + } + s.Current.MachinePools = map[string]*scope.MachinePoolState{ + "big-pool-of-machines": { + Object: currentMp, + BootstrapObject: workerBootstrapTemplate, + InfrastructureMachinePoolObject: workerInfrastructureMachinePoolTemplate, + }, + } + + actual, err := computeMachinePool(ctx, s, mpTopology) + g.Expect(err).ToNot(HaveOccurred()) + + actualMp := actual.Object + + g.Expect(*actualMp.Spec.Replicas).NotTo(Equal(currentReplicas)) + g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains)) + g.Expect(actualMp.Name).To(Equal("existing-pool-1")) + + expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations) + delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation) + delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation) + g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations)) + g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations)) + + g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", + }))) + g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", + }))) + + g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).To(Equal("linux-worker-inframachinepooltemplate")) + g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal("linux-worker-bootstraptemplate")) + }) + + t.Run("If a machine pool references a topology class that does not exist, machine pool generation fails", func(t *testing.T) { + g := NewWithT(t) + scope := scope.New(cluster) + scope.Blueprint = blueprint + + mpTopology = clusterv1.MachinePoolTopology{ + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{"foo": "baz"}, + }, + Class: "windows-worker", + Name: "big-pool-of-machines", + } + + _, err := computeMachinePool(ctx, scope, mpTopology) + g.Expect(err).To(HaveOccurred()) + }) + + t.Run("Should choose the correct version for machine pool", func(t *testing.T) { + controlPlaneStable123 := builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.3", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.3", + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build() + + // Note: in all the following tests we are setting it up so that the control plane is already + // stable at the topology version. + // A more extensive list of scenarios is tested in TestComputeMachinePoolVersion. + tests := []struct { + name string + upgradingMachinePools []string + currentMPVersion *string + upgradeConcurrency string + topologyVersion string + expectedVersion string + }{ + { + name: "use cluster.spec.topology.version if creating a new machine pool", + upgradingMachinePools: []string{}, + upgradeConcurrency: "1", + currentMPVersion: nil, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + }, + { + name: "use cluster.spec.topology.version if creating a new machine pool while another machine pool is upgrading", + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: "1", + currentMPVersion: nil, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + }, + { + name: "use machine pool's spec.template.spec.version if one of the machine pools is upgrading, concurrency limit reached", + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: "1", + currentMPVersion: pointer.String("v1.2.2"), + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + }, + { + name: "use cluster.spec.topology.version if one of the machine pools is upgrading, concurrency limit not reached", + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: "2", + currentMPVersion: pointer.String("v1.2.2"), + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + testCluster := cluster.DeepCopy() + if testCluster.Annotations == nil { + testCluster.Annotations = map[string]string{} + } + testCluster.Annotations[clusterv1.ClusterTopologyUpgradeConcurrencyAnnotation] = tt.upgradeConcurrency + + s := scope.New(testCluster) + s.Blueprint = blueprint + s.Blueprint.Topology.Version = tt.topologyVersion + s.Blueprint.Topology.ControlPlane = clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + } + s.Blueprint.Topology.Workers = &clusterv1.WorkersTopology{} + + mpsState := scope.MachinePoolsStateMap{} + if tt.currentMPVersion != nil { + // testing a case with an existing machine pool + // add the stable machine pool to the current machine pools state + mp := builder.MachinePool("test-namespace", "big-pool-of-machines"). + WithReplicas(2). + WithVersion(*tt.currentMPVersion). + WithStatus(expv1.MachinePoolStatus{ + ObservedGeneration: 2, + Replicas: 2, + ReadyReplicas: 2, + AvailableReplicas: 2, + }). + Build() + mpsState = duplicateMachinePoolsState(mpsState) + mpsState["big-pool-of-machines"] = &scope.MachinePoolState{ + Object: mp, + } + } + s.Current.MachinePools = mpsState + s.Current.ControlPlane = &scope.ControlPlaneState{ + Object: controlPlaneStable123, + } + + mpTopology := clusterv1.MachinePoolTopology{ + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: pointer.Int32(2), + } + s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) + obj, err := computeMachinePool(ctx, s, mpTopology) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion)) + }) + } + }) +} + func TestComputeMachineDeploymentVersion(t *testing.T) { controlPlaneObj := builder.ControlPlane("test1", "cp1"). Build() @@ -1895,6 +2254,184 @@ func TestComputeMachineDeploymentVersion(t *testing.T) { } } +func TestComputeMachinePoolVersion(t *testing.T) { + controlPlaneObj := builder.ControlPlane("test1", "cp1"). + Build() + + mpName := "mp-1" + currentMachinePoolState := &scope.MachinePoolState{Object: builder.MachinePool("test1", mpName).WithVersion("v1.2.2").Build()} + + tests := []struct { + name string + machinePoolTopology clusterv1.MachinePoolTopology + currentMachinePoolState *scope.MachinePoolState + upgradingMachinePools []string + upgradeConcurrency int + controlPlaneStartingUpgrade bool + controlPlaneUpgrading bool + controlPlaneScaling bool + controlPlaneProvisioning bool + afterControlPlaneUpgradeHookBlocking bool + topologyVersion string + expectedVersion string + expectPendingCreate bool + expectPendingUpgrade bool + }{ + { + name: "should return cluster.spec.topology.version if creating a new machine Pool and if control plane is stable - not marked as pending create", + currentMachinePoolState: nil, + machinePoolTopology: clusterv1.MachinePoolTopology{ + Name: "mp-topology-1", + }, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + expectPendingCreate: false, + }, + { + name: "should return cluster.spec.topology.version if creating a new machine Pool and if control plane is not stable - marked as pending create", + controlPlaneScaling: true, + machinePoolTopology: clusterv1.MachinePoolTopology{ + Name: "mp-topology-1", + }, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + expectPendingCreate: true, + }, + { + name: "should return machine Pool's spec.template.spec.version if upgrade is deferred", + machinePoolTopology: clusterv1.MachinePoolTopology{ + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyDeferUpgradeAnnotation: "", + }, + }, + }, + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + // Control plane is considered upgrading if the control plane's spec.version and status.version is not equal. + name: "should return machine Pool's spec.template.spec.version if control plane is upgrading", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + controlPlaneUpgrading: true, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + // Control plane is considered ready to upgrade if spec.version of current and desired control planes are not equal. + name: "should return machine Pool's spec.template.spec.version if control plane is starting upgrade", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + controlPlaneStartingUpgrade: true, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + // Control plane is considered scaling if its spec.replicas is not equal to any of status.replicas, status.readyReplicas or status.updatedReplicas. + name: "should return machine Pool's spec.template.spec.version if control plane is scaling", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + controlPlaneScaling: true, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + name: "should return cluster.spec.topology.version if the control plane is not upgrading, not scaling, not ready to upgrade and none of the machine Pools are upgrading", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + expectPendingUpgrade: false, + }, + { + name: "should return machine Pool's spec.template.spec.version if control plane is stable, other machine Pools are upgrading, concurrency limit not reached but AfterControlPlaneUpgrade hook is blocking", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: 2, + afterControlPlaneUpgradeHookBlocking: true, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + name: "should return cluster.spec.topology.version if control plane is stable, other machine Pools are upgrading, concurrency limit not reached", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: 2, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + expectPendingUpgrade: false, + }, + { + name: "should return machine Pool's spec.template.spec.version if control plane is stable, other machine Pools are upgrading, concurrency limit reached", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{"upgrading-mp1", "upgrading-mp2"}, + upgradeConcurrency: 2, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + s := &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ + Version: tt.topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + Workers: &clusterv1.WorkersTopology{}, + }}, + Current: &scope.ClusterState{ + ControlPlane: &scope.ControlPlaneState{Object: controlPlaneObj}, + }, + UpgradeTracker: scope.NewUpgradeTracker(scope.MaxMPUpgradeConcurrency(tt.upgradeConcurrency)), + HookResponseTracker: scope.NewHookResponseTracker(), + } + if tt.afterControlPlaneUpgradeHookBlocking { + s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, &runtimehooksv1.AfterControlPlaneUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + RetryAfterSeconds: 10, + }, + }) + } + s.UpgradeTracker.ControlPlane.IsStartingUpgrade = tt.controlPlaneStartingUpgrade + s.UpgradeTracker.ControlPlane.IsUpgrading = tt.controlPlaneUpgrading + s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling + s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning + s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) + version := computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState) + g.Expect(version).To(Equal(tt.expectedVersion)) + + if tt.currentMachinePoolState != nil { + // Verify that if the upgrade is pending it is captured in the upgrade tracker. + if tt.expectPendingUpgrade { + g.Expect(s.UpgradeTracker.MachinePools.IsPendingUpgrade(mpName)).To(BeTrue(), "MachinePool should be marked as pending upgrade") + } else { + g.Expect(s.UpgradeTracker.MachinePools.IsPendingUpgrade(mpName)).To(BeFalse(), "MachinePool should not be marked as pending upgrade") + } + } else { + // Verify that if create the pending it is capture in the tracker. + if tt.expectPendingCreate { + g.Expect(s.UpgradeTracker.MachinePools.IsPendingCreate(tt.machinePoolTopology.Name)).To(BeTrue(), "MachinePool topology should be marked as pending create") + } else { + g.Expect(s.UpgradeTracker.MachinePools.IsPendingCreate(tt.machinePoolTopology.Name)).To(BeFalse(), "MachinePool topology should not be marked as pending create") + } + } + }) + } +} + func TestIsMachineDeploymentDeferred(t *testing.T) { clusterTopology := &clusterv1.Topology{ Workers: &clusterv1.WorkersTopology{ @@ -1978,6 +2515,89 @@ func TestIsMachineDeploymentDeferred(t *testing.T) { } } +func TestIsMachinePoolDeferred(t *testing.T) { + clusterTopology := &clusterv1.Topology{ + Workers: &clusterv1.WorkersTopology{ + MachinePools: []clusterv1.MachinePoolTopology{ + { + Name: "mp-with-defer-upgrade", + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyDeferUpgradeAnnotation: "", + }, + }, + }, + { + Name: "mp-without-annotations", + }, + { + Name: "mp-with-hold-upgrade-sequence", + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "", + }, + }, + }, + { + Name: "mp-after-mp-with-hold-upgrade-sequence", + }, + }, + }, + } + + tests := []struct { + name string + mpTopology clusterv1.MachinePoolTopology + deferred bool + }{ + { + name: "MP with defer-upgrade annotation is deferred", + mpTopology: clusterv1.MachinePoolTopology{ + Name: "mp-with-defer-upgrade", + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyDeferUpgradeAnnotation: "", + }, + }, + }, + deferred: true, + }, + { + name: "MP without annotations is not deferred", + mpTopology: clusterv1.MachinePoolTopology{ + Name: "mp-without-annotations", + }, + deferred: false, + }, + { + name: "MP with hold-upgrade-sequence annotation is deferred", + mpTopology: clusterv1.MachinePoolTopology{ + Name: "mp-with-hold-upgrade-sequence", + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "", + }, + }, + }, + deferred: true, + }, + { + name: "MP after mp with hold-upgrade-sequence is deferred", + mpTopology: clusterv1.MachinePoolTopology{ + Name: "mp-after-mp-with-hold-upgrade-sequence", + }, + deferred: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(isMachinePoolDeferred(clusterTopology, tt.mpTopology)).To(Equal(tt.deferred)) + }) + } +} + func TestTemplateToObject(t *testing.T) { template := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infrastructureClusterTemplate"). WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}). @@ -2189,6 +2809,14 @@ func duplicateMachineDeploymentsState(s scope.MachineDeploymentsStateMap) scope. return n } +func duplicateMachinePoolsState(s scope.MachinePoolsStateMap) scope.MachinePoolsStateMap { + n := make(scope.MachinePoolsStateMap) + for k, v := range s { + n[k] = v + } + return n +} + func TestMergeMap(t *testing.T) { t.Run("Merge maps", func(t *testing.T) { g := NewWithT(t)