diff --git a/test/e2e/autoscaler.go b/test/e2e/autoscaler.go index 349373f264d6..7bc480ca9458 100644 --- a/test/e2e/autoscaler.go +++ b/test/e2e/autoscaler.go @@ -92,7 +92,7 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -211,6 +211,6 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 0e649381de5b..fd4156e9be29 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -130,7 +130,7 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust } // Setup a Namespace where to host objects for this spec and create a watcher for the Namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -267,6 +267,6 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust AfterEach(func() { // Dumps all the resources in the spec Namespace, then cleanups the cluster object and the spec Namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 209361bbbec4..a8290992e107 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -134,7 +134,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl } // Set up a Namespace where to host objects for this spec and create a watcher for the Namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -286,7 +286,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl }, 10*time.Second, 1*time.Second).Should(Succeed(), "delete extensionConfig failed") // Dumps all the resources in the spec Namespace, then cleanups the cluster object and the spec Namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index 45d4700dd3d5..f919a701ec92 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -154,7 +154,7 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas Expect(input.ModifyControlPlaneFields).ToNot(BeEmpty(), "Invalid argument. input.ModifyControlPlaneFields can't be empty when calling %s spec", specName) // Set up a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -232,7 +232,7 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/clusterclass_rollout.go b/test/e2e/clusterclass_rollout.go index 6d6a37289498..e9c1c3238429 100644 --- a/test/e2e/clusterclass_rollout.go +++ b/test/e2e/clusterclass_rollout.go @@ -112,7 +112,7 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersion))) // Set up a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -291,7 +291,7 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index ca3dfbdd3943..af4f8475d821 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -246,7 +246,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - managementClusterNamespace, managementClusterCancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + managementClusterNamespace, managementClusterCancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) managementClusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -645,7 +645,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg AfterEach(func() { if testNamespace != nil { // Dump all the logs from the workload cluster before deleting them. - dumpAllResources(ctx, managementClusterProxy, input.ArtifactFolder, testNamespace, managementClusterResources.Cluster) + framework.DumpAllResourcesAndLogs(ctx, managementClusterProxy, input.ArtifactFolder, testNamespace, managementClusterResources.Cluster) if !input.SkipCleanup { Byf("Deleting all clusters in namespace %s in management cluster %s", testNamespace.Name, managementClusterName) @@ -675,7 +675,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg input.PreCleanupManagementCluster(managementClusterProxy) } // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/common.go b/test/e2e/common.go index 0fa27c475197..5af9772bbcc8 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -19,21 +19,12 @@ package e2e import ( "context" "fmt" - "path/filepath" "github.com/blang/semver/v4" . "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega/types" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/test/e2e/internal/log" - "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" - "sigs.k8s.io/cluster-api/util" ) // Test suite constants for e2e config variables. @@ -57,88 +48,6 @@ func Byf(format string, a ...interface{}) { By(fmt.Sprintf(format, a...)) } -func setupSpecNamespace(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string, postNamespaceCreated func(framework.ClusterProxy, string)) (*corev1.Namespace, context.CancelFunc) { - Byf("Creating a namespace for hosting the %q test spec", specName) - namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ - Creator: clusterProxy.GetClient(), - ClientSet: clusterProxy.GetClientSet(), - Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), - LogFolder: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName()), - }) - - if postNamespaceCreated != nil { - log.Logf("Calling postNamespaceCreated for namespace %s", namespace.Name) - postNamespaceCreated(clusterProxy, namespace.Name) - } - - return namespace, cancelWatches -} - -// dumpAllResources dumps all the resources in the spec namespace and the workload cluster. -func dumpAllResources(ctx context.Context, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cluster *clusterv1.Cluster) { - Byf("Dumping logs from the %q workload cluster", cluster.Name) - - // Dump all the logs from the workload cluster. - clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) - - Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) - - // Dump all Cluster API related resources to artifacts. - framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ - Lister: clusterProxy.GetClient(), - Namespace: namespace.Name, - LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"), - }) - - // If the cluster still exists, dump pods and nodes of the workload cluster. - if err := clusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(cluster), &clusterv1.Cluster{}); err == nil { - Byf("Dumping Pods and Nodes of Cluster %s", klog.KObj(cluster)) - framework.DumpResourcesForCluster(ctx, framework.DumpResourcesForClusterInput{ - Lister: clusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name).GetClient(), - Cluster: cluster, - LogPath: filepath.Join(artifactFolder, "clusters", cluster.Name, "resources"), - Resources: []framework.DumpNamespaceAndGVK{ - { - GVK: schema.GroupVersionKind{ - Version: corev1.SchemeGroupVersion.Version, - Kind: "Pod", - }, - }, - { - GVK: schema.GroupVersionKind{ - Version: corev1.SchemeGroupVersion.Version, - Kind: "Node", - }, - }, - }, - }) - } -} - -// dumpSpecResourcesAndCleanup dumps all the resources in the spec namespace and cleans up the spec namespace. -func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}, skipCleanup bool) { - // Dump all the resources in the spec namespace and the workload cluster. - dumpAllResources(ctx, clusterProxy, artifactFolder, namespace, cluster) - - if !skipCleanup { - Byf("Deleting cluster %s", klog.KObj(cluster)) - // While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance - // that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait - // instead of DeleteClusterAndWait - framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{ - Client: clusterProxy.GetClient(), - Namespace: namespace.Name, - }, intervalsGetter(specName, "wait-delete-cluster")...) - - Byf("Deleting namespace used for hosting the %q test spec", specName) - framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ - Deleter: clusterProxy.GetClient(), - Name: namespace.Name, - }) - } - cancelWatches() -} - // HaveValidVersion succeeds if version is a valid semver version. func HaveValidVersion(version string) types.GomegaMatcher { return &validVersionMatcher{version: version} diff --git a/test/e2e/k8s_conformance.go b/test/e2e/k8s_conformance.go index 5b768ca6a57e..44b45122541a 100644 --- a/test/e2e/k8s_conformance.go +++ b/test/e2e/k8s_conformance.go @@ -87,7 +87,7 @@ func K8SConformanceSpec(ctx context.Context, inputGetter func() K8SConformanceSp Expect(kubetestConfigFilePath).To(BeAnExistingFile(), "%s should be a valid kubetest config file") // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -158,6 +158,6 @@ func K8SConformanceSpec(ctx context.Context, inputGetter func() K8SConformanceSp AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/kcp_adoption.go b/test/e2e/kcp_adoption.go index 6ed2bbee756f..44b9fe1e2a07 100644 --- a/test/e2e/kcp_adoption.go +++ b/test/e2e/kcp_adoption.go @@ -102,7 +102,7 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) }) It("Should adopt up-to-date control plane Machines without modification", func() { @@ -253,6 +253,6 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/kcp_remediations.go b/test/e2e/kcp_remediations.go index bce6470697d1..0f93c58c367f 100644 --- a/test/e2e/kcp_remediations.go +++ b/test/e2e/kcp_remediations.go @@ -107,7 +107,7 @@ func KCPRemediationSpec(ctx context.Context, inputGetter func() KCPRemediationSp Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) }) It("Should replace unhealthy machines", func() { @@ -418,7 +418,7 @@ func KCPRemediationSpec(ctx context.Context, inputGetter func() KCPRemediationSp AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/machine_pool.go b/test/e2e/machine_pool.go index 56b90c9260db..a35d028db2fd 100644 --- a/test/e2e/machine_pool.go +++ b/test/e2e/machine_pool.go @@ -81,7 +81,7 @@ func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolInput) { Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersion))) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -144,6 +144,6 @@ func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolInput) { AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/md_remediations.go b/test/e2e/md_remediations.go index 11d44ed190a4..1f9f484e43db 100644 --- a/test/e2e/md_remediations.go +++ b/test/e2e/md_remediations.go @@ -80,7 +80,7 @@ func MachineDeploymentRemediationSpec(ctx context.Context, inputGetter func() Ma Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -125,6 +125,6 @@ func MachineDeploymentRemediationSpec(ctx context.Context, inputGetter func() Ma AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/md_rollout.go b/test/e2e/md_rollout.go index b0f1ec383cc3..e1060e8a11c5 100644 --- a/test/e2e/md_rollout.go +++ b/test/e2e/md_rollout.go @@ -75,7 +75,7 @@ func MachineDeploymentRolloutSpec(ctx context.Context, inputGetter func() Machin Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersion))) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -125,6 +125,6 @@ func MachineDeploymentRolloutSpec(ctx context.Context, inputGetter func() Machin AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/md_scale.go b/test/e2e/md_scale.go index 994d2a500f7b..205bfa06a740 100644 --- a/test/e2e/md_scale.go +++ b/test/e2e/md_scale.go @@ -75,7 +75,7 @@ func MachineDeploymentScaleSpec(ctx context.Context, inputGetter func() MachineD Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersion))) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -128,6 +128,6 @@ func MachineDeploymentScaleSpec(ctx context.Context, inputGetter func() MachineD AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/node_drain_timeout.go b/test/e2e/node_drain_timeout.go index 0b4e0a36c646..51337e860431 100644 --- a/test/e2e/node_drain_timeout.go +++ b/test/e2e/node_drain_timeout.go @@ -87,7 +87,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo Expect(input.E2EConfig.GetIntervals(specName, "wait-machine-deleted")).ToNot(BeNil()) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -169,7 +169,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 7caebd3201b0..14d17c5d80ce 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -99,7 +99,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) }) @@ -159,6 +159,6 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go index 7f0455842085..40cf9bc065e9 100644 --- a/test/e2e/self_hosted.go +++ b/test/e2e/self_hosted.go @@ -132,7 +132,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) } // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. - namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) if input.ControlPlaneMachineCount == nil { @@ -412,7 +412,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) AfterEach(func() { if selfHostedNamespace != nil { // Dump all Cluster API related resources to artifacts before pivoting back. - dumpAllResources(ctx, selfHostedClusterProxy, input.ArtifactFolder, namespace, clusterResources.Cluster) + framework.DumpAllResourcesAndLogs(ctx, selfHostedClusterProxy, input.ArtifactFolder, namespace, clusterResources.Cluster) } if selfHostedCluster != nil { By("Ensure API servers are stable before doing move") @@ -449,7 +449,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) } // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } diff --git a/test/framework/spec_helpers.go b/test/framework/spec_helpers.go new file mode 100644 index 000000000000..9a1891dec448 --- /dev/null +++ b/test/framework/spec_helpers.go @@ -0,0 +1,120 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework/internal/log" + "sigs.k8s.io/cluster-api/util" +) + +func byf(format string, a ...interface{}) { + By(fmt.Sprintf(format, a...)) +} + +// SetupSpecNamespace creates a namespace for the test spec and setups a watch for the namespace events. +func SetupSpecNamespace(ctx context.Context, specName string, clusterProxy ClusterProxy, artifactFolder string, postNamespaceCreated func(ClusterProxy, string)) (*corev1.Namespace, context.CancelFunc) { + byf("Creating a namespace for hosting the %q test spec", specName) + namespace, cancelWatches := CreateNamespaceAndWatchEvents(ctx, CreateNamespaceAndWatchEventsInput{ + Creator: clusterProxy.GetClient(), + ClientSet: clusterProxy.GetClientSet(), + Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + LogFolder: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName()), + }) + + if postNamespaceCreated != nil { + log.Logf("Calling postNamespaceCreated for namespace %s", namespace.Name) + postNamespaceCreated(clusterProxy, namespace.Name) + } + + return namespace, cancelWatches +} + +// DumpAllResourcesAndLogs dumps all the resources in the spec namespace and the workload cluster. +func DumpAllResourcesAndLogs(ctx context.Context, clusterProxy ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cluster *clusterv1.Cluster) { + byf("Dumping logs from the %q workload cluster", cluster.Name) + + // Dump all the logs from the workload cluster. + clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) + + byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) + + // Dump all Cluster API related resources to artifacts. + DumpAllResources(ctx, DumpAllResourcesInput{ + Lister: clusterProxy.GetClient(), + Namespace: namespace.Name, + LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"), + }) + + // If the cluster still exists, dump pods and nodes of the workload cluster. + if err := clusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(cluster), &clusterv1.Cluster{}); err == nil { + byf("Dumping Pods and Nodes of Cluster %s", klog.KObj(cluster)) + DumpResourcesForCluster(ctx, DumpResourcesForClusterInput{ + Lister: clusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name).GetClient(), + Cluster: cluster, + LogPath: filepath.Join(artifactFolder, "clusters", cluster.Name, "resources"), + Resources: []DumpNamespaceAndGVK{ + { + GVK: schema.GroupVersionKind{ + Version: corev1.SchemeGroupVersion.Version, + Kind: "Pod", + }, + }, + { + GVK: schema.GroupVersionKind{ + Version: corev1.SchemeGroupVersion.Version, + Kind: "Node", + }, + }, + }, + }) + } +} + +// DumpSpecResourcesAndCleanup dumps all the resources in the spec namespace and cleans up the spec namespace. +func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []interface{}, skipCleanup bool) { + // Dump all the resources in the spec namespace and the workload cluster. + DumpAllResourcesAndLogs(ctx, clusterProxy, artifactFolder, namespace, cluster) + + if !skipCleanup { + byf("Deleting cluster %s", klog.KObj(cluster)) + // While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance + // that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait + // instead of DeleteClusterAndWait + DeleteAllClustersAndWait(ctx, DeleteAllClustersAndWaitInput{ + Client: clusterProxy.GetClient(), + Namespace: namespace.Name, + }, intervalsGetter(specName, "wait-delete-cluster")...) + + byf("Deleting namespace used for hosting the %q test spec", specName) + DeleteNamespace(ctx, DeleteNamespaceInput{ + Deleter: clusterProxy.GetClient(), + Name: namespace.Name, + }) + } + cancelWatches() +}