Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🌱 Enable integration tests of RuntimeExtensions #10330

Merged
merged 10 commits into from
Mar 28, 2024
sbueringer marked this conversation as resolved.
Show resolved Hide resolved

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

package cluster
package topology

import (
"strings"
Expand All @@ -24,11 +24,15 @@ import (
"github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
utilfeature "k8s.io/component-base/featuregate/testing"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -42,10 +46,25 @@ import (
"sigs.k8s.io/cluster-api/internal/hooks"
fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
"sigs.k8s.io/cluster-api/internal/test/builder"
"sigs.k8s.io/cluster-api/internal/topology/clustershim"
"sigs.k8s.io/cluster-api/internal/topology/names"
"sigs.k8s.io/cluster-api/internal/topology/ownerrefs"
"sigs.k8s.io/cluster-api/util"
)

var (
ctx = ctrl.SetupSignalHandler()
fakeScheme = runtime.NewScheme()
)

func init() {
_ = clientgoscheme.AddToScheme(fakeScheme)
_ = clusterv1.AddToScheme(fakeScheme)
_ = apiextensionsv1.AddToScheme(fakeScheme)
_ = expv1.AddToScheme(fakeScheme)
_ = corev1.AddToScheme(fakeScheme)
}

var (
fakeRef1 = &corev1.ObjectReference{
Kind: "refKind1",
Expand Down Expand Up @@ -135,7 +154,7 @@ func TestComputeInfrastructureCluster(t *testing.T) {
})
t.Run("Carry over the owner reference to ClusterShim, if any", func(t *testing.T) {
g := NewWithT(t)
shim := clusterShim(cluster)
shim := clustershim.New(cluster)

// current cluster objects for the test scenario
clusterWithInfrastructureRef := cluster.DeepCopy()
Expand All @@ -144,13 +163,13 @@ func TestComputeInfrastructureCluster(t *testing.T) {
// aggregating current cluster objects into ClusterState (simulating getCurrentState)
scope := scope.New(clusterWithInfrastructureRef)
scope.Current.InfrastructureCluster = infrastructureClusterTemplate.DeepCopy()
scope.Current.InfrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ownerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))})
scope.Current.InfrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))})
scope.Blueprint = blueprint

obj, err := computeInfrastructureCluster(ctx, scope)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).ToNot(BeNil())
g.Expect(hasOwnerReferenceFrom(obj, shim)).To(BeTrue())
g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue())
})
}

Expand Down Expand Up @@ -319,7 +338,7 @@ func TestComputeControlPlane(t *testing.T) {
scope := scope.New(cluster)
scope.Blueprint = blueprint

r := &Reconciler{}
r := &desiredStateEngine{}

obj, err := r.computeControlPlane(ctx, scope, nil)
g.Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -380,7 +399,7 @@ func TestComputeControlPlane(t *testing.T) {
scope := scope.New(cluster)
scope.Blueprint = blueprint

r := &Reconciler{}
r := &desiredStateEngine{}

obj, err := r.computeControlPlane(ctx, scope, nil)
g.Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -410,7 +429,7 @@ func TestComputeControlPlane(t *testing.T) {
scope := scope.New(clusterWithoutReplicas)
scope.Blueprint = blueprint

r := &Reconciler{}
r := &desiredStateEngine{}

obj, err := r.computeControlPlane(ctx, scope, nil)
g.Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -455,7 +474,7 @@ func TestComputeControlPlane(t *testing.T) {
s.Blueprint = blueprint
s.Current.ControlPlane = &scope.ControlPlaneState{}

r := &Reconciler{}
r := &desiredStateEngine{}

obj, err := r.computeControlPlane(ctx, s, infrastructureMachineTemplate)
g.Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -516,7 +535,7 @@ func TestComputeControlPlane(t *testing.T) {
scope := scope.New(clusterWithControlPlaneRef)
scope.Blueprint = blueprint

r := &Reconciler{}
r := &desiredStateEngine{}

obj, err := r.computeControlPlane(ctx, scope, nil)
g.Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -586,7 +605,7 @@ func TestComputeControlPlane(t *testing.T) {
Object: tt.currentControlPlane,
}

r := &Reconciler{}
r := &desiredStateEngine{}

obj, err := r.computeControlPlane(ctx, s, nil)
g.Expect(err).ToNot(HaveOccurred())
Expand All @@ -597,7 +616,7 @@ func TestComputeControlPlane(t *testing.T) {
})
t.Run("Carry over the owner reference to ClusterShim, if any", func(t *testing.T) {
g := NewWithT(t)
shim := clusterShim(cluster)
shim := clustershim.New(cluster)

// current cluster objects
clusterWithoutReplicas := cluster.DeepCopy()
Expand All @@ -623,15 +642,15 @@ func TestComputeControlPlane(t *testing.T) {
}).
Build(),
}
s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))})
s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))})
s.Blueprint = blueprint

r := &Reconciler{}
r := &desiredStateEngine{}

obj, err := r.computeControlPlane(ctx, s, nil)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(obj).ToNot(BeNil())
g.Expect(hasOwnerReferenceFrom(obj, shim)).To(BeTrue())
g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue())
})
}

Expand Down Expand Up @@ -862,11 +881,10 @@ func TestComputeControlPlaneVersion(t *testing.T) {
}).
Build()

fakeClient := fake.NewClientBuilder().WithObjects(s.Current.Cluster).Build()
fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(s.Current.Cluster).Build()

r := &Reconciler{
r := &desiredStateEngine{
Client: fakeClient,
APIReader: fakeClient,
RuntimeClient: runtimeClient,
}
version, err := r.computeControlPlaneVersion(ctx, s)
Expand Down Expand Up @@ -1166,11 +1184,10 @@ func TestComputeControlPlaneVersion(t *testing.T) {
WithCatalog(catalog).
Build()

fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build()
fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(tt.s.Current.Cluster).Build()

r := &Reconciler{
r := &desiredStateEngine{
Client: fakeClient,
APIReader: fakeClient,
RuntimeClient: fakeRuntimeClient,
}

Expand Down Expand Up @@ -1243,11 +1260,10 @@ func TestComputeControlPlaneVersion(t *testing.T) {
}).
Build()

fakeClient := fake.NewClientBuilder().WithObjects(s.Current.Cluster).Build()
fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(s.Current.Cluster).Build()

r := &Reconciler{
r := &desiredStateEngine{
Client: fakeClient,
APIReader: fakeClient,
RuntimeClient: runtimeClient,
}

Expand Down Expand Up @@ -1419,7 +1435,9 @@ func TestComputeMachineDeployment(t *testing.T) {
scope := scope.New(cluster)
scope.Blueprint = blueprint

actual, err := computeMachineDeployment(ctx, scope, mdTopology)
e := desiredStateEngine{}

actual, err := e.computeMachineDeployment(ctx, scope, mdTopology)
g.Expect(err).ToNot(HaveOccurred())

g.Expect(actual.BootstrapTemplate.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentNameLabel, "big-pool-of-machines"))
Expand Down Expand Up @@ -1488,7 +1506,9 @@ func TestComputeMachineDeployment(t *testing.T) {
// missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy
}

actual, err := computeMachineDeployment(ctx, scope, mdTopology)
e := desiredStateEngine{}

actual, err := e.computeMachineDeployment(ctx, scope, mdTopology)
g.Expect(err).ToNot(HaveOccurred())

// checking only values from CC defaults
Expand Down Expand Up @@ -1532,7 +1552,9 @@ func TestComputeMachineDeployment(t *testing.T) {
},
}

actual, err := computeMachineDeployment(ctx, s, mdTopology)
e := desiredStateEngine{}

actual, err := e.computeMachineDeployment(ctx, s, mdTopology)
g.Expect(err).ToNot(HaveOccurred())

actualMd := actual.Object
Expand Down Expand Up @@ -1580,7 +1602,9 @@ func TestComputeMachineDeployment(t *testing.T) {
Name: "big-pool-of-machines",
}

_, err := computeMachineDeployment(ctx, scope, mdTopology)
e := desiredStateEngine{}

_, err := e.computeMachineDeployment(ctx, scope, mdTopology)
g.Expect(err).To(HaveOccurred())
})

Expand Down Expand Up @@ -1692,7 +1716,10 @@ func TestComputeMachineDeployment(t *testing.T) {
Replicas: ptr.To[int32](2),
}
s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...)
obj, err := computeMachineDeployment(ctx, s, mdTopology)

e := desiredStateEngine{}

obj, err := e.computeMachineDeployment(ctx, s, mdTopology)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion))
})
Expand All @@ -1708,7 +1735,9 @@ func TestComputeMachineDeployment(t *testing.T) {
Name: "big-pool-of-machines",
}

actual, err := computeMachineDeployment(ctx, scope, mdTopology)
e := desiredStateEngine{}

actual, err := e.computeMachineDeployment(ctx, scope, mdTopology)
g.Expect(err).ToNot(HaveOccurred())
// Check that the ClusterName and selector are set properly for the MachineHealthCheck.
g.Expect(actual.MachineHealthCheck.Spec.ClusterName).To(Equal(cluster.Name))
Expand Down Expand Up @@ -1817,7 +1846,9 @@ func TestComputeMachinePool(t *testing.T) {
scope := scope.New(cluster)
scope.Blueprint = blueprint

actual, err := computeMachinePool(ctx, scope, mpTopology)
e := desiredStateEngine{}

actual, err := e.computeMachinePool(ctx, scope, mpTopology)
g.Expect(err).ToNot(HaveOccurred())

g.Expect(actual.BootstrapObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines"))
Expand Down Expand Up @@ -1880,7 +1911,9 @@ func TestComputeMachinePool(t *testing.T) {
// missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy
}

actual, err := computeMachinePool(ctx, scope, mpTopology)
e := desiredStateEngine{}

actual, err := e.computeMachinePool(ctx, scope, mpTopology)
g.Expect(err).ToNot(HaveOccurred())

// checking only values from CC defaults
Expand Down Expand Up @@ -1923,7 +1956,9 @@ func TestComputeMachinePool(t *testing.T) {
},
}

actual, err := computeMachinePool(ctx, s, mpTopology)
e := desiredStateEngine{}

actual, err := e.computeMachinePool(ctx, s, mpTopology)
g.Expect(err).ToNot(HaveOccurred())

actualMp := actual.Object
Expand Down Expand Up @@ -1966,7 +2001,9 @@ func TestComputeMachinePool(t *testing.T) {
Name: "big-pool-of-machines",
}

_, err := computeMachinePool(ctx, scope, mpTopology)
e := desiredStateEngine{}

_, err := e.computeMachinePool(ctx, scope, mpTopology)
g.Expect(err).To(HaveOccurred())
})

Expand Down Expand Up @@ -2076,7 +2113,10 @@ func TestComputeMachinePool(t *testing.T) {
Replicas: ptr.To[int32](2),
}
s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...)
obj, err := computeMachinePool(ctx, s, mpTopology)

e := desiredStateEngine{}

obj, err := e.computeMachinePool(ctx, s, mpTopology)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion))
})
Expand Down Expand Up @@ -2240,7 +2280,10 @@ func TestComputeMachineDeploymentVersion(t *testing.T) {
s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling
s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning
s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...)
version := computeMachineDeploymentVersion(s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState)

e := desiredStateEngine{}

version := e.computeMachineDeploymentVersion(s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState)
g.Expect(version).To(Equal(tt.expectedVersion))

if tt.currentMachineDeploymentState != nil {
Expand Down Expand Up @@ -2418,7 +2461,10 @@ func TestComputeMachinePoolVersion(t *testing.T) {
s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling
s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning
s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...)
version := computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState)

e := desiredStateEngine{}

version := e.computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState)
g.Expect(version).To(Equal(tt.expectedVersion))

if tt.currentMachinePoolState != nil {
Expand Down Expand Up @@ -2889,7 +2935,7 @@ func Test_computeMachineHealthCheck(t *testing.T) {
clusterv1.ClusterTopologyOwnedLabel: "",
},
OwnerReferences: []metav1.OwnerReference{
*ownerReferenceTo(cluster, clusterv1.GroupVersion.WithKind("Cluster")),
*ownerrefs.OwnerReferenceTo(cluster, clusterv1.GroupVersion.WithKind("Cluster")),
},
},
Spec: clusterv1.MachineHealthCheckSpec{
Expand Down
Loading