From caaa74482b51fae777334cd7a29595da1c06481e Mon Sep 17 00:00:00 2001 From: Yuvaraj Kakaraparthi Date: Sat, 10 Jun 2023 00:20:13 -0700 Subject: [PATCH] add scale e2e --- .gitignore | 2 +- Makefile | 3 + test/e2e/autoscaler_test.go | 1 + test/e2e/cluster_upgrade.go | 14 +- test/e2e/cluster_upgrade_runtimesdk.go | 14 +- test/e2e/cluster_upgrade_runtimesdk_test.go | 11 +- test/e2e/cluster_upgrade_test.go | 48 +- test/e2e/clusterclass_changes.go | 12 +- test/e2e/clusterclass_changes_test.go | 14 +- test/e2e/clusterclass_rollout.go | 13 +- test/e2e/clusterclass_rollout_test.go | 14 +- test/e2e/clusterctl_upgrade.go | 15 +- test/e2e/clusterctl_upgrade_test.go | 43 +- test/e2e/config/docker.yaml | 16 + .../main/bases/cluster-with-topology.yaml | 22 + .../main/cluster-template/kustomization.yaml | 3 + .../main/clusterclass-in-memory.yaml | 107 ++++ test/e2e/k8s_conformance.go | 19 +- test/e2e/k8s_conformance_test.go | 13 +- test/e2e/kcp_adoption.go | 14 +- test/e2e/kcp_adoption_test.go | 13 +- test/e2e/kcp_remediations.go | 45 +- test/e2e/kcp_remediations_test.go | 13 +- test/e2e/machine_pool.go | 13 +- test/e2e/machine_pool_test.go | 12 +- test/e2e/md_remediations.go | 13 +- test/e2e/md_remediations_test.go | 12 +- test/e2e/md_rollout.go | 13 +- test/e2e/md_rollout_test.go | 12 +- test/e2e/md_scale.go | 14 +- test/e2e/md_scale_test.go | 12 +- test/e2e/node_drain_timeout.go | 13 +- test/e2e/node_drain_timeout_test.go | 12 +- test/e2e/quick_start_test.go | 76 +-- test/e2e/scale.go | 497 ++++++++++++++++++ test/e2e/scale_test.go | 42 ++ test/e2e/self_hosted.go | 13 +- test/e2e/self_hosted_test.go | 14 +- test/framework/cluster_helpers.go | 4 +- .../clusterctl/clusterctl_helpers.go | 99 +++- 40 files changed, 1145 insertions(+), 195 deletions(-) create mode 100644 test/e2e/data/infrastructure-inmemory/main/bases/cluster-with-topology.yaml create mode 100644 test/e2e/data/infrastructure-inmemory/main/cluster-template/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml create mode 100644 test/e2e/scale.go create mode 100644 test/e2e/scale_test.go diff --git a/.gitignore b/.gitignore index 5839ce26b0c7..95c74ae21c08 100644 --- a/.gitignore +++ b/.gitignore @@ -12,7 +12,7 @@ hack/tools/bin # E2E test templates test/e2e/data/infrastructure-docker/**/cluster-template*.yaml - +test/e2e/data/infrastructure-inmemory/**/cluster-template*.yaml # Output of Makefile targets using sed on MacOS systems *.yaml-e diff --git a/Makefile b/Makefile index 49af0c4802e7..b9c6a8e553dd 100644 --- a/Makefile +++ b/Makefile @@ -505,6 +505,7 @@ generate-modules: ## Run go mod tidy to ensure modules are up to date generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v0.4 v1.0 v1.3 v1.4 main) ## Generate cluster templates for all versions DOCKER_TEMPLATES := test/e2e/data/infrastructure-docker +INMEMORY_TEMPLATES := test/e2e/data/infrastructure-inmemory .PHONY: generate-e2e-templates-v0.4 generate-e2e-templates-v0.4: $(KUSTOMIZE) @@ -546,6 +547,8 @@ generate-e2e-templates-main: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-topology.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-ignition --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-ignition.yaml + $(KUSTOMIZE) build $(INMEMORY_TEMPLATES)/main/cluster-template --load-restrictor LoadRestrictionsNone > $(INMEMORY_TEMPLATES)/main/cluster-template.yaml + .PHONY: generate-metrics-config generate-metrics-config: $(ENVSUBST_BIN) ## Generate ./hack/observability/kube-state-metrics/crd-config.yaml OUTPUT_FILE="${OBSERVABILITY_DIR}/kube-state-metrics/crd-config.yaml"; \ diff --git a/test/e2e/autoscaler_test.go b/test/e2e/autoscaler_test.go index e2d5a2ee3060..a85b53bb9538 100644 --- a/test/e2e/autoscaler_test.go +++ b/test/e2e/autoscaler_test.go @@ -32,6 +32,7 @@ var _ = Describe("When using the autoscaler with Cluster API using ClusterClass BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), InfrastructureMachineTemplateKind: "dockermachinetemplates", Flavor: pointer.String("topology-autoscaler"), AutoscalerVersion: "v1.26.2", diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 9971cac69069..cb4615ac5ffc 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -43,6 +43,13 @@ type ClusterUpgradeConformanceSpecInput struct { SkipConformanceTests bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // ControlPlaneMachineCount is used in `config cluster` to configure the count of the control plane machines used in the test. // Default is 1. ControlPlaneMachineCount *int64 @@ -118,13 +125,18 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust It("Should create and upgrade a workload cluster and eventually run kubetest", func() { By("Creating a workload cluster") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: pointer.StringDeref(input.Flavor, "upgrades"), Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 4353d7578cbd..7cd817c60f68 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -62,6 +62,13 @@ type clusterUpgradeWithRuntimeSDKSpecInput struct { ArtifactFolder string SkipCleanup bool + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // ControlPlaneMachineCount is used in `config cluster` to configure the count of the control plane machines used in the test. // Default is 1. ControlPlaneMachineCount *int64 @@ -150,13 +157,18 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl Namespace: namespace.Name, } + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: pointer.StringDeref(input.Flavor, "upgrades"), Namespace: namespace.Name, ClusterName: clusterName, diff --git a/test/e2e/cluster_upgrade_runtimesdk_test.go b/test/e2e/cluster_upgrade_runtimesdk_test.go index 464de8f7d79b..5596311b3b12 100644 --- a/test/e2e/cluster_upgrade_runtimesdk_test.go +++ b/test/e2e/cluster_upgrade_runtimesdk_test.go @@ -35,11 +35,12 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with Runt } return clusterUpgradeWithRuntimeSDKSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), // "upgrades" is the same as the "topology" flavor but with an additional MachinePool. Flavor: pointer.String("upgrades-runtimesdk"), } diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index 31d177f53f51..22927542bbac 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -52,12 +52,13 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass and testi } return ClusterUpgradeConformanceSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: flavor, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: flavor, + InfrastructureProvider: pointer.String("docker"), } }) }) @@ -65,12 +66,13 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass and testi var _ = Describe("When upgrading a workload cluster using ClusterClass [ClusterClass]", func() { ClusterUpgradeConformanceSpec(ctx, func() ClusterUpgradeConformanceSpecInput { return ClusterUpgradeConformanceSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: pointer.String("topology"), + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), + Flavor: pointer.String("topology"), // This test is run in CI in parallel with other tests. To keep the test duration reasonable // the conformance tests are skipped. ControlPlaneMachineCount: pointer.Int64(1), @@ -83,11 +85,12 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass [ClusterC var _ = Describe("When upgrading a workload cluster using ClusterClass with a HA control plane [ClusterClass]", func() { ClusterUpgradeConformanceSpec(ctx, func() ClusterUpgradeConformanceSpecInput { return ClusterUpgradeConformanceSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), // This test is run in CI in parallel with other tests. To keep the test duration reasonable // the conformance tests are skipped. SkipConformanceTests: true, @@ -101,11 +104,12 @@ var _ = Describe("When upgrading a workload cluster using ClusterClass with a HA var _ = Describe("When upgrading a workload cluster using ClusterClass with a HA control plane using scale-in rollout [ClusterClass]", func() { ClusterUpgradeConformanceSpec(ctx, func() ClusterUpgradeConformanceSpecInput { return ClusterUpgradeConformanceSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), // This test is run in CI in parallel with other tests. To keep the test duration reasonable // the conformance tests are skipped. SkipConformanceTests: true, diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index c94161a259cc..a0cad1b83227 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -51,6 +51,12 @@ type ClusterClassChangesSpecInput struct { SkipCleanup bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string // Flavor is the cluster-template flavor used to create the Cluster for testing. // NOTE: The template must be using a ClusterClass. Flavor string @@ -131,13 +137,17 @@ func ClusterClassChangesSpec(ctx context.Context, inputGetter func() ClusterClas It("Should successfully rollout the managed topology upon changes to the ClusterClass", func() { By("Creating a workload cluster") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: input.Flavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/clusterclass_changes_test.go b/test/e2e/clusterclass_changes_test.go index 3bca5b2d49ec..5d8fbab9a606 100644 --- a/test/e2e/clusterclass_changes_test.go +++ b/test/e2e/clusterclass_changes_test.go @@ -21,17 +21,19 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing ClusterClass changes [ClusterClass]", func() { ClusterClassChangesSpec(ctx, func() ClusterClassChangesSpecInput { return ClusterClassChangesSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: "topology", + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), + Flavor: "topology", // ModifyControlPlaneFields are the ControlPlane fields which will be set on the // ControlPlaneTemplate of the ClusterClass after the initial Cluster creation. // The test verifies that these fields are rolled out to the ControlPlane. diff --git a/test/e2e/clusterclass_rollout.go b/test/e2e/clusterclass_rollout.go index 4a57e438f194..6c43f755e9f7 100644 --- a/test/e2e/clusterclass_rollout.go +++ b/test/e2e/clusterclass_rollout.go @@ -58,6 +58,13 @@ type ClusterClassRolloutSpecInput struct { SkipCleanup bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // Flavor is the cluster-template flavor used to create the Cluster for testing. // NOTE: The template must be using ClusterClass, KCP and CABPK as this test is specifically // testing ClusterClass and KCP rollout behavior. @@ -105,13 +112,17 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas It("Should successfully rollout the managed topology upon changes to the ClusterClass", func() { By("Creating a workload cluster") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: input.Flavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/clusterclass_rollout_test.go b/test/e2e/clusterclass_rollout_test.go index 87192345e398..be124137d572 100644 --- a/test/e2e/clusterclass_rollout_test.go +++ b/test/e2e/clusterclass_rollout_test.go @@ -21,17 +21,19 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing ClusterClass rollouts [ClusterClass]", func() { ClusterClassRolloutSpec(ctx, func() ClusterClassRolloutSpecInput { return ClusterClassRolloutSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: "topology", + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: "topology", + InfrastructureProvider: pointer.String("docker"), } }) }) diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 7ae4538ebf41..65ad7b15c2f2 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -95,6 +95,13 @@ type ClusterctlUpgradeSpecInput struct { // UpgradeClusterctlVariables can be used to set additional variables for clusterctl upgrade. UpgradeClusterctlVariables map[string]string SkipCleanup bool + + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string // PreWaitForCluster is a function that can be used as a hook to apply extra resources (that cannot be part of the template) in the generated namespace hosting the cluster // This function is called after applying the cluster template and before waiting for the cluster resources. PreWaitForCluster func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string, workloadClusterName string) @@ -216,6 +223,10 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg By("Creating a workload cluster to be used as a new management cluster") // NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades. // So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } managementClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, @@ -223,7 +234,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: input.MgmtFlavor, Namespace: managementClusterNamespace.Name, ClusterName: managementClusterName, @@ -362,7 +373,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg KubernetesVersion: kubernetesVersion, ControlPlaneMachineCount: controlPlaneMachineCount, WorkerMachineCount: workerMachineCount, - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, // setup clusterctl logs folder LogFolder: filepath.Join(input.ArtifactFolder, "clusters", managementClusterProxy.GetName()), }) diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 47127cea0f23..db46b885ac38 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -21,6 +21,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" "sigs.k8s.io/cluster-api/test/framework" ) @@ -33,6 +34,7 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>current)", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.4.8/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1alpha4", InitWithKubernetesVersion: "v1.23.17", @@ -57,12 +59,13 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>current)", func() { var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() { ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { return ClusterctlUpgradeSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.5/clusterctl-{OS}-{ARCH}", + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.0.5/clusterctl-{OS}-{ARCH}", // We have to pin the providers because with `InitWithProvidersContract` the test would // use the latest version for the contract (which is v1.3.X for v1beta1). InitWithCoreProvider: "cluster-api:v1.0.5", @@ -95,12 +98,13 @@ var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() { var _ = Describe("When testing clusterctl upgrades (v1.3=>current)", func() { ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { return ClusterctlUpgradeSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.6/clusterctl-{OS}-{ARCH}", + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.6/clusterctl-{OS}-{ARCH}", // We have to pin the providers because with `InitWithProvidersContract` the test would // use the latest version for the contract (which is v1.4.X for v1beta1). InitWithCoreProvider: "cluster-api:v1.3.6", @@ -134,12 +138,13 @@ var _ = Describe("When testing clusterctl upgrades (v1.3=>current)", func() { var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.3=>current) [ClusterClass]", func() { ClusterctlUpgradeSpec(ctx, func() ClusterctlUpgradeSpecInput { return ClusterctlUpgradeSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.6/clusterctl-{OS}-{ARCH}", + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.3.6/clusterctl-{OS}-{ARCH}", // We have to pin the providers because with `InitWithProvidersContract` the test would // use the latest version for the contract (which is v1.4.X for v1beta1). InitWithCoreProvider: "cluster-api:v1.3.6", @@ -178,6 +183,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.0/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1beta1", InitWithKubernetesVersion: "v1.27.1", @@ -207,6 +213,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.4=>cur BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.4.0/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1beta1", InitWithKubernetesVersion: "v1.27.1", diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 40e3fdece360..3f3f383a8701 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -19,6 +19,8 @@ images: loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/capd-manager-{ARCH}:dev loadBehavior: tryLoad +- name: gcr.io/k8s-staging-cluster-api/capim-manager-{ARCH}:dev + loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/test-extension-{ARCH}:dev loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-cainjector:v1.12.1 @@ -246,6 +248,20 @@ providers: - sourcePath: "../data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml" - sourcePath: "../data/shared/main/metadata.yaml" +- name: in-memory + type: InfrastructureProvider + versions: + - name: v1.5.99 # next; use manifest from source files + value: ../../../test/infrastructure/inmemory/config/default + replacements: + - old: --metrics-bind-addr=localhost:8080 + new: "--metrics-bind-addr=:8080\n - --logging-format=json" + files: + # Add cluster templates + - sourcePath: "../data/infrastructure-inmemory/main/clusterclass-in-memory.yaml" + - sourcePath: "../data/infrastructure-inmemory/main/cluster-template.yaml" + - sourcePath: "../data/shared/main/metadata.yaml" + - name: test-extension type: RuntimeExtensionProvider versions: diff --git a/test/e2e/data/infrastructure-inmemory/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-inmemory/main/bases/cluster-with-topology.yaml new file mode 100644 index 000000000000..61fe1299299d --- /dev/null +++ b/test/e2e/data/infrastructure-inmemory/main/bases/cluster-with-topology.yaml @@ -0,0 +1,22 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + clusterNetwork: + services: + cidrBlocks: ${SERVICE_CIDR:=["10.128.0.0/12"]} + pods: + cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} + serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} + topology: + class: in-memory + version: ${KUBERNETES_VERSION} + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + workers: + machineDeployments: + - class: default-worker + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} diff --git a/test/e2e/data/infrastructure-inmemory/main/cluster-template/kustomization.yaml b/test/e2e/data/infrastructure-inmemory/main/cluster-template/kustomization.yaml new file mode 100644 index 000000000000..eca48686e8e2 --- /dev/null +++ b/test/e2e/data/infrastructure-inmemory/main/cluster-template/kustomization.yaml @@ -0,0 +1,3 @@ +bases: +- ../bases/cluster-with-topology.yaml + diff --git a/test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml b/test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml new file mode 100644 index 000000000000..9064a6e254b2 --- /dev/null +++ b/test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml @@ -0,0 +1,107 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: in-memory +spec: + controlPlane: + metadata: + annotations: + # The in-memory provider currently does not support looking up coredns + # and kube-proxy information and leads to reconcile errors in KCP. + # With these annotations KCP will skip processing those steps. + controlplane.cluster.x-k8s.io/skip-coredns: "" + controlplane.cluster.x-k8s.io/skip-kube-proxy: "" + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: InMemoryMachineTemplate + name: in-memory-control-plane + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: in-memory-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: InMemoryClusterTemplate + name: in-memory-cluster + workers: + machineDeployments: + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: in-memory-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: InMemoryMachineTemplate + name: in-memory-default-worker-machinetemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: InMemoryClusterTemplate +metadata: + name: in-memory-cluster +spec: + template: + spec: {} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: in-memory-control-plane +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + - host.docker.internal + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + initConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: InMemoryMachineTemplate +metadata: + name: in-memory-control-plane +spec: + template: + spec: {} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: InMemoryMachineTemplate +metadata: + name: in-memory-default-worker-machinetemplate +spec: + template: + spec: {} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: in-memory-default-worker-bootstraptemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% \ No newline at end of file diff --git a/test/e2e/k8s_conformance.go b/test/e2e/k8s_conformance.go index 675c73745e55..41ab6ee4ce44 100644 --- a/test/e2e/k8s_conformance.go +++ b/test/e2e/k8s_conformance.go @@ -40,8 +40,16 @@ type K8SConformanceSpecInput struct { BootstrapClusterProxy framework.ClusterProxy ArtifactFolder string SkipCleanup bool - Flavor string - ControlPlaneWaiters clusterctl.ControlPlaneWaiters + + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + + Flavor string + ControlPlaneWaiters clusterctl.ControlPlaneWaiters } // K8SConformanceSpec implements a spec that creates a cluster and runs Kubernetes conformance suite. @@ -79,6 +87,11 @@ func K8SConformanceSpec(ctx context.Context, inputGetter func() K8SConformanceSp It("Should create a workload cluster and run kubetest", func() { By("Creating a workload cluster") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } + // NOTE: The number of CP nodes does not have relevance for conformance; instead, the number of workers allows // better parallelism of tests and thus a lower execution time. var workerMachineCount int64 = 5 @@ -89,7 +102,7 @@ func K8SConformanceSpec(ctx context.Context, inputGetter func() K8SConformanceSp LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: input.Flavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/k8s_conformance_test.go b/test/e2e/k8s_conformance_test.go index 1a06d1ee900e..8725b50ecc8d 100644 --- a/test/e2e/k8s_conformance_test.go +++ b/test/e2e/k8s_conformance_test.go @@ -21,16 +21,17 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing K8S conformance [Conformance]", func() { K8SConformanceSpec(ctx, func() K8SConformanceSpecInput { return K8SConformanceSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - } + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker")} }) }) diff --git a/test/e2e/kcp_adoption.go b/test/e2e/kcp_adoption.go index e71af91149b5..be673e30fc44 100644 --- a/test/e2e/kcp_adoption.go +++ b/test/e2e/kcp_adoption.go @@ -48,6 +48,13 @@ type KCPAdoptionSpecInput struct { ArtifactFolder string SkipCleanup bool + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // Flavor, if specified, must refer to a template that is // specially crafted with individual control plane machines // and a KubeadmControlPlane resource configured for adoption. @@ -102,6 +109,11 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu WaitForClusterIntervals := input.E2EConfig.GetIntervals(specName, "wait-cluster") WaitForControlPlaneIntervals := input.E2EConfig.GetIntervals(specName, "wait-control-plane") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } + workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ // pass reference to the management cluster hosting this test KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), @@ -113,7 +125,7 @@ func KCPAdoptionSpec(ctx context.Context, inputGetter func() KCPAdoptionSpecInpu Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, ControlPlaneMachineCount: replicas, WorkerMachineCount: pointer.Int64(0), // setup clusterctl logs folder diff --git a/test/e2e/kcp_adoption_test.go b/test/e2e/kcp_adoption_test.go index 7b82f64add84..2db1e8e5093d 100644 --- a/test/e2e/kcp_adoption_test.go +++ b/test/e2e/kcp_adoption_test.go @@ -21,16 +21,17 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing KCP adoption", func() { KCPAdoptionSpec(ctx, func() KCPAdoptionSpecInput { return KCPAdoptionSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - } + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker")} }) }) diff --git a/test/e2e/kcp_remediations.go b/test/e2e/kcp_remediations.go index d2b6e3363c91..fae495f93f15 100644 --- a/test/e2e/kcp_remediations.go +++ b/test/e2e/kcp_remediations.go @@ -61,6 +61,13 @@ type KCPRemediationSpecInput struct { ArtifactFolder string SkipCleanup bool + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // Flavor, if specified, must refer to a template that has a MachineHealthCheck // - 3 node CP, no workers // - Control plane machines having a pre-kubeadm command that queries for a well-known ConfigMap on the management cluster, @@ -109,12 +116,13 @@ func KCPRemediationSpec(ctx context.Context, inputGetter func() KCPRemediationSp // Creates the workload cluster. clusterResources = createWorkloadClusterAndWait(ctx, createWorkloadClusterAndWaitInput{ - E2EConfig: input.E2EConfig, - ClusterctlConfigPath: input.ClusterctlConfigPath, - Proxy: input.BootstrapClusterProxy, - ArtifactFolder: input.ArtifactFolder, - SpecName: specName, - Flavor: pointer.StringDeref(input.Flavor, "kcp-remediation"), + E2EConfig: input.E2EConfig, + ClusterctlConfigPath: input.ClusterctlConfigPath, + Proxy: input.BootstrapClusterProxy, + ArtifactFolder: input.ArtifactFolder, + SpecName: specName, + Flavor: pointer.StringDeref(input.Flavor, "kcp-remediation"), + InfrastructureProvider: input.InfrastructureProvider, // values to be injected in the template @@ -424,15 +432,16 @@ func createConfigMapForMachinesBootstrapSignal(ctx context.Context, writer clien } type createWorkloadClusterAndWaitInput struct { - E2EConfig *clusterctl.E2EConfig - ClusterctlConfigPath string - Proxy framework.ClusterProxy - ArtifactFolder string - SpecName string - Flavor string - Namespace string - AuthenticationToken string - ServerAddr string + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + Proxy framework.ClusterProxy + ArtifactFolder string + SpecName string + Flavor string + Namespace string + AuthenticationToken string + ServerAddr string + InfrastructureProvider *string } // createWorkloadClusterAndWait creates a workload cluster and return as soon as the cluster infrastructure is ready. @@ -444,6 +453,10 @@ func createWorkloadClusterAndWait(ctx context.Context, input createWorkloadClust // gets the cluster template log.Logf("Getting the cluster template yaml") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } clusterName := fmt.Sprintf("%s-%s", input.SpecName, util.RandomString(6)) workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ // pass the clusterctl config file that points to the local provider repository created for this test, @@ -459,7 +472,7 @@ func createWorkloadClusterAndWait(ctx context.Context, input createWorkloadClust KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), ControlPlaneMachineCount: pointer.Int64(3), WorkerMachineCount: pointer.Int64(0), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, // setup clusterctl logs folder LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.Proxy.GetName()), // Adds authenticationToken, server address and namespace variables to be injected in the cluster template. diff --git a/test/e2e/kcp_remediations_test.go b/test/e2e/kcp_remediations_test.go index cdd9fd50efc4..9b9a4869b3b7 100644 --- a/test/e2e/kcp_remediations_test.go +++ b/test/e2e/kcp_remediations_test.go @@ -21,16 +21,17 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing KCP remediation", func() { KCPRemediationSpec(ctx, func() KCPRemediationSpecInput { return KCPRemediationSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - } + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker")} }) }) diff --git a/test/e2e/machine_pool.go b/test/e2e/machine_pool.go index 24c074a1ce70..e72ec388c6c8 100644 --- a/test/e2e/machine_pool.go +++ b/test/e2e/machine_pool.go @@ -44,6 +44,13 @@ type MachinePoolInput struct { SkipCleanup bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // Flavor, if specified must refer to a template that contains a MachinePool resource. // If not specified, "machine-pool" is used Flavor *string @@ -77,13 +84,17 @@ func MachinePoolSpec(ctx context.Context, inputGetter func() MachinePoolInput) { It("Should successfully create a cluster with machine pool machines", func() { By("Creating a workload cluster") workerMachineCount := int32(2) + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: pointer.StringDeref(input.Flavor, "machine-pool"), Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/machine_pool_test.go b/test/e2e/machine_pool_test.go index c903c57e589c..99a1f1d62e47 100644 --- a/test/e2e/machine_pool_test.go +++ b/test/e2e/machine_pool_test.go @@ -21,16 +21,18 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing MachinePools", func() { MachinePoolSpec(ctx, func() MachinePoolInput { return MachinePoolInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), } }) }) diff --git a/test/e2e/md_remediations.go b/test/e2e/md_remediations.go index 7d95a032518f..3d9be2eeafe4 100644 --- a/test/e2e/md_remediations.go +++ b/test/e2e/md_remediations.go @@ -41,6 +41,13 @@ type MachineDeploymentRemediationSpecInput struct { SkipCleanup bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // Flavor, if specified, must refer to a template that has a MachineHealthCheck // resource configured to match the MachineDeployment managed Machines and be // configured to treat "e2e.remediation.condition" "False" as an unhealthy @@ -76,13 +83,17 @@ func MachineDeploymentRemediationSpec(ctx context.Context, inputGetter func() Ma It("Should replace unhealthy machines", func() { By("Creating a workload cluster") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: pointer.StringDeref(input.Flavor, "md-remediation"), Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/md_remediations_test.go b/test/e2e/md_remediations_test.go index c35b60938d82..458b77f38cfa 100644 --- a/test/e2e/md_remediations_test.go +++ b/test/e2e/md_remediations_test.go @@ -21,16 +21,18 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing MachineDeployment remediation", func() { MachineDeploymentRemediationSpec(ctx, func() MachineDeploymentRemediationSpecInput { return MachineDeploymentRemediationSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), } }) }) diff --git a/test/e2e/md_rollout.go b/test/e2e/md_rollout.go index 1fb3fa43ed74..46b8d6f38aa9 100644 --- a/test/e2e/md_rollout.go +++ b/test/e2e/md_rollout.go @@ -41,6 +41,13 @@ type MachineDeploymentRolloutSpecInput struct { SkipCleanup bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters Flavor string + + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string } // MachineDeploymentRolloutSpec implements a test that verifies that MachineDeployment rolling updates are successful. @@ -70,13 +77,17 @@ func MachineDeploymentRolloutSpec(ctx context.Context, inputGetter func() Machin It("Should successfully upgrade Machines upon changes in relevant MachineDeployment fields", func() { By("Creating a workload cluster") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: input.Flavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/md_rollout_test.go b/test/e2e/md_rollout_test.go index b8ceb49ec155..6d30413fd665 100644 --- a/test/e2e/md_rollout_test.go +++ b/test/e2e/md_rollout_test.go @@ -21,16 +21,18 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing MachineDeployment rolling upgrades", func() { MachineDeploymentRolloutSpec(ctx, func() MachineDeploymentRolloutSpecInput { return MachineDeploymentRolloutSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), } }) }) diff --git a/test/e2e/md_scale.go b/test/e2e/md_scale.go index 6ab8a81c91d0..0c3d92505967 100644 --- a/test/e2e/md_scale.go +++ b/test/e2e/md_scale.go @@ -41,6 +41,13 @@ type MachineDeploymentScaleSpecInput struct { SkipCleanup bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters Flavor string + + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string } // MachineDeploymentScaleSpec implements a test that verifies that MachineDeployment scale operations are successful. @@ -70,14 +77,17 @@ func MachineDeploymentScaleSpec(ctx context.Context, inputGetter func() MachineD It("Should successfully scale a MachineDeployment up and down upon changes to the MachineDeployment replica count", func() { By("Creating a workload cluster") - + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: input.Flavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/md_scale_test.go b/test/e2e/md_scale_test.go index ec493d105be7..e60fa686e59c 100644 --- a/test/e2e/md_scale_test.go +++ b/test/e2e/md_scale_test.go @@ -21,16 +21,18 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing MachineDeployment scale out/in", func() { MachineDeploymentScaleSpec(ctx, func() MachineDeploymentScaleSpecInput { return MachineDeploymentScaleSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), } }) }) diff --git a/test/e2e/node_drain_timeout.go b/test/e2e/node_drain_timeout.go index 879a49ab14ac..c2b80d987d3b 100644 --- a/test/e2e/node_drain_timeout.go +++ b/test/e2e/node_drain_timeout.go @@ -45,6 +45,13 @@ type NodeDrainTimeoutSpecInput struct { SkipCleanup bool ControlPlaneWaiters clusterctl.ControlPlaneWaiters + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // Flavor, if specified, must refer to a template that contains // a KubeadmControlPlane resource with spec.machineTemplate.nodeDrainTimeout // configured and a MachineDeployment resource that has @@ -82,6 +89,10 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo It("A node should be forcefully removed if it cannot be drained in time", func() { By("Creating a workload cluster") + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } controlPlaneReplicas := 3 clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, @@ -89,7 +100,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() NodeDrainTimeo LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: pointer.StringDeref(input.Flavor, "node-drain"), Namespace: namespace.Name, ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), diff --git a/test/e2e/node_drain_timeout_test.go b/test/e2e/node_drain_timeout_test.go index 9d74efe50953..4bc661f75ea7 100644 --- a/test/e2e/node_drain_timeout_test.go +++ b/test/e2e/node_drain_timeout_test.go @@ -21,16 +21,18 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" ) var _ = Describe("When testing node drain timeout", func() { NodeDrainTimeoutSpec(ctx, func() NodeDrainTimeoutSpecInput { return NodeDrainTimeoutSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), } }) }) diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index 17370f6254db..91b59ef90078 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -31,11 +31,12 @@ import ( var _ = Describe("When following the Cluster API quick-start", func() { QuickStartSpec(ctx, func() QuickStartSpecInput { return QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, @@ -54,12 +55,13 @@ var _ = Describe("When following the Cluster API quick-start", func() { var _ = Describe("When following the Cluster API quick-start with ClusterClass [PR-Blocking] [ClusterClass]", func() { QuickStartSpec(ctx, func() QuickStartSpecInput { return QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: pointer.String("topology"), + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("topology"), + InfrastructureProvider: pointer.String("docker"), // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, @@ -79,12 +81,13 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ var _ = Describe("When following the Cluster API quick-start with IPv6 [IPv6]", func() { QuickStartSpec(ctx, func() QuickStartSpecInput { return QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: pointer.String("ipv6"), + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("ipv6"), + InfrastructureProvider: pointer.String("docker"), } }) }) @@ -92,12 +95,13 @@ var _ = Describe("When following the Cluster API quick-start with IPv6 [IPv6]", var _ = Describe("When following the Cluster API quick-start with Ignition", func() { QuickStartSpec(ctx, func() QuickStartSpecInput { return QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: pointer.String("ignition"), + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("ignition"), + InfrastructureProvider: pointer.String("docker"), } }) }) @@ -105,12 +109,13 @@ var _ = Describe("When following the Cluster API quick-start with Ignition", fun var _ = Describe("When following the Cluster API quick-start with dualstack and ipv4 primary [IPv6]", func() { QuickStartSpec(ctx, func() QuickStartSpecInput { return QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: pointer.String("topology-dualstack-ipv4-primary"), + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("topology-dualstack-ipv4-primary"), + InfrastructureProvider: pointer.String("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { By("Running kubetest dualstack tests") // Start running the dualstack test suite from kubetest. @@ -130,12 +135,13 @@ var _ = Describe("When following the Cluster API quick-start with dualstack and var _ = Describe("When following the Cluster API quick-start with dualstack and ipv6 primary [IPv6]", func() { QuickStartSpec(ctx, func() QuickStartSpecInput { return QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: pointer.String("topology-dualstack-ipv6-primary"), + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("topology-dualstack-ipv6-primary"), + InfrastructureProvider: pointer.String("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { By("Running kubetest dualstack tests") // Start running the dualstack test suite from kubetest. diff --git a/test/e2e/scale.go b/test/e2e/scale.go new file mode 100644 index 000000000000..26e1ff35196a --- /dev/null +++ b/test/e2e/scale.go @@ -0,0 +1,497 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "sync" + + . "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/test/e2e/internal/log" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util/yaml" +) + +const ( + scaleClusterCount = "CAPI_SCALE_CLUSTER_COUNT" + scaleConcurrency = "CAPI_SCALE_CONCURRENCY" + scaleControlPlaneMachineCount = "CAPI_SCALE_CONTROL_PLANE_MACHINE_COUNT" + scaleWorkerMachineCount = "CAPI_SCALE_WORKER_MACHINE_COUNT" +) + +// scaleSpecInput is the input for scaleSpec. +type scaleSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + + // Flavor, if specified is the template flavor used to create the cluster for testing. + // If not specified, the default flavor for the selected infrastructure provider is used. + Flavor *string + + // ClusterCount is the number of target workload clusters. + // If unspecified, defaults to 10. + // Can be overridden by variable CAPI_SCALE_CLUSTER_COUNT. + ClusterCount *int64 + + // Concurrency is the maximum concurrency of each of the scale operations. + // If unspecified it defaults to 5. + // Can be overridden by variable CAPI_SCALE_CONCURRENCY. + Concurrency *int64 + + // ControlPlaneMachineCount defines the number of control plane machines to be added to each workload cluster. + // If not specified, 1 will be used. + // Can be overridden by variable CAPI_SCALE_CONTROLPLANE_MACHINE_COUNT. + ControlPlaneMachineCount *int64 + + // WorkerMachineCount defines number of worker machines to be added to each workload cluster. + // If not specified, 1 will be used. + // Can be overridden by variable CAPI_SCALE_WORKER_MACHINE_COUNT. + WorkerMachineCount *int64 + + // FailFast if set to true will return immediately after the first cluster operation fails. + // If set to false, the test suite will not exit immediately after the first cluster operation fails. + // Example: When creating clusters from c1 to c20 consider c6 fails creation. If FailFast is set to true + // the suit will exit immediately after receiving the c6 creation error. If set to false, cluster creations + // of the other clusters will continue and all the errors are collected before the test exists. + // Note: Please note that the test suit will still fail since c6 creation failed. FailFast will determine + // if the test suit should fail as soon as c6 fails or if it should fail after all cluster creations are done. + FailFast bool +} + +// scaleSpec implements a scale test. +func scaleSpec(ctx context.Context, inputGetter func() scaleSpecInput) { + var ( + specName = "scale" + input scaleSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should create and delete workload clusters", func() { + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } + + flavor := clusterctl.DefaultFlavor + if input.Flavor != nil { + flavor = *input.Flavor + } + + controlPlaneMachineCount := pointer.Int64(1) + if input.ControlPlaneMachineCount != nil { + controlPlaneMachineCount = input.ControlPlaneMachineCount + } + // If variable is defined that will take precedence. + if input.E2EConfig.HasVariable(scaleControlPlaneMachineCount) { + controlPlaneMachineCountStr := input.E2EConfig.GetVariable(scaleControlPlaneMachineCount) + controlPlaneMachineCountInt, err := strconv.Atoi(controlPlaneMachineCountStr) + Expect(err).NotTo(HaveOccurred()) + controlPlaneMachineCount = pointer.Int64(int64(controlPlaneMachineCountInt)) + } + + workerMachineCount := pointer.Int64(1) + if input.WorkerMachineCount != nil { + workerMachineCount = input.WorkerMachineCount + } + // If variable is defined that will take precedence. + if input.E2EConfig.HasVariable(scaleWorkerMachineCount) { + workerMachineCountStr := input.E2EConfig.GetVariable(scaleWorkerMachineCount) + workerMachineCountInt, err := strconv.Atoi(workerMachineCountStr) + Expect(err).NotTo(HaveOccurred()) + workerMachineCount = pointer.Int64(int64(workerMachineCountInt)) + } + + clusterCount := int64(10) + if input.ClusterCount != nil { + clusterCount = *input.ClusterCount + } + // If variable is defined that will take precedence. + if input.E2EConfig.HasVariable(scaleClusterCount) { + clusterCountStr := input.E2EConfig.GetVariable(scaleClusterCount) + var err error + clusterCount, err = strconv.ParseInt(clusterCountStr, 10, 64) + Expect(err).NotTo(HaveOccurred(), "%q value should be integer", scaleClusterCount) + } + + concurrency := int64(5) + if input.Concurrency != nil { + concurrency = *input.Concurrency + } + // If variable is defined that will take precedence. + if input.E2EConfig.HasVariable(scaleConcurrency) { + concurrencyStr := input.E2EConfig.GetVariable(scaleConcurrency) + var err error + concurrency, err = strconv.ParseInt(concurrencyStr, 10, 64) + Expect(err).NotTo(HaveOccurred(), "%q value should be integer", scaleConcurrency) + } + + // TODO(ykakarap): Follow-up: Add support for legacy cluster templates. + + By("Create the ClusterClass to be used by all workload clusters") + + // IMPORTANT: ConfigCluster function in the test framework is currently not concurrency safe. + // Therefore, it is not advised to call this functions across all the concurrency workers. + // To avoid this problem we chose to run ConfigCluster once and reuse its output across all the workers. + log.Logf("Generating YAML for base Cluster and ClusterClass") + baseClusterName := fmt.Sprintf("%s-base", specName) + baseWorkloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: infrastructureProvider, + Flavor: flavor, + Namespace: namespace.Name, + ClusterName: baseClusterName, + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), + ControlPlaneMachineCount: controlPlaneMachineCount, + WorkerMachineCount: workerMachineCount, + }) + Expect(baseWorkloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template") + + // Separate the Cluster YAML and the ClusterClass YAML so that we can apply the ClusterCLass ahead of time + // to avoid race conditions while applying the ClusterClass when trying to create multiple clusters concurrently. + // Nb. Apply function in the test framework uses `kubectl apply` internally. `kubectl apply` detects + // if the resource has to be created or updated before actually executing the operation. If another worker changes + // the status of the cluster during this timeframe the operation will fail. + log.Logf("Extract ClusterClass and Cluster from template YAML") + clusterClassYAML, baseClusterTemplateYAML := extractClusterClassAndClusterFromTemplate(baseWorkloadClusterTemplate) + + // Apply the ClusterClass. + log.Logf("Create ClusterClass") + Eventually(func() error { + return input.BootstrapClusterProxy.Apply(ctx, clusterClassYAML, "-n", namespace.Name) + }).Should(Succeed()) + + By("Create workload clusters concurrently") + // Create multiple clusters concurrently from the same base cluster template. + + clusterNames := make([]string, 0, clusterCount) + for i := int64(1); i <= clusterCount; i++ { + name := fmt.Sprintf("%s-%d", specName, i) + clusterNames = append(clusterNames, name) + } + + clusterCreateResults, err := workConcurrentlyAndWait(ctx, workConcurrentlyAndWaitInput{ + ClusterNames: clusterNames, + Concurrency: concurrency, + FailFast: input.FailFast, + WorkerFunc: func(ctx context.Context, inputChan chan string, resultChan chan workResult, wg *sync.WaitGroup) { + createClusterAndWaitWorker(ctx, inputChan, resultChan, wg, baseClusterTemplateYAML, baseClusterName, clusterctl.ApplyCustomClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Namespace: namespace.Name, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + }, + }) + if err != nil { + // Call Fail to notify ginkgo that the suit has failed. + // Ginkgo will print the first observed error failure in this case. + // Example: If cluster c1, c2 and c3 failed then ginkgo will only print the first + // observed failure among the these 3 clusters. + // Since ginkgo only captures one failure, to help with this we are logging the error + // that will contain the full stack trace of failure for each cluster to help with debugging. + // TODO(ykakarap): Follow-up: Explore options for improved error reporting. + log.Logf("Failed to create clusters. Error: %s", err.Error()) + Fail("") + } + + // TODO(ykakarap): Follow-up: Dump resources for the failed clusters (creation). + + clusterNamesToDelete := []string{} + for _, result := range clusterCreateResults { + clusterNamesToDelete = append(clusterNamesToDelete, result.clusterName) + } + + By("Delete the workload clusters concurrently") + // Now delete all the workload clusters. + _, err = workConcurrentlyAndWait(ctx, workConcurrentlyAndWaitInput{ + ClusterNames: clusterNamesToDelete, + Concurrency: concurrency, + FailFast: input.FailFast, + WorkerFunc: func(ctx context.Context, inputChan chan string, resultChan chan workResult, wg *sync.WaitGroup) { + deleteClusterAndWaitWorker(ctx, inputChan, resultChan, wg, input.BootstrapClusterProxy.GetClient(), namespace.Name) + }, + }) + if err != nil { + // Call Fail to notify ginkgo that the suit has failed. + // Ginkgo will print the first observed error failure in this case. + // Example: If cluster c1, c2 and c3 failed then ginkgo will only print the first + // observed failure among the these 3 clusters. + // Since ginkgo only captures one failure, to help with this we are logging the error + // that will contain the full stack trace of failure for each cluster to help with debugging. + // TODO(ykakarap): Follow-up: Explore options for improved error reporting. + log.Logf("Failed to delete clusters. Error: %s", err.Error()) + Fail("") + } + + // TODO(ykakarap): Follow-up: Dump resources for the failed clusters (deletion). + + By("PASSED!") + }) + + AfterEach(func() { + cancelWatches() + }) +} + +func extractClusterClassAndClusterFromTemplate(rawYAML []byte) ([]byte, []byte) { + objs, err := yaml.ToUnstructured(rawYAML) + Expect(err).NotTo(HaveOccurred()) + clusterObj := unstructured.Unstructured{} + clusterClassAndTemplates := []unstructured.Unstructured{} + for _, obj := range objs { + if obj.GroupVersionKind().GroupKind() == clusterv1.GroupVersion.WithKind("Cluster").GroupKind() { + clusterObj = obj + } else { + clusterClassAndTemplates = append(clusterClassAndTemplates, obj) + } + } + clusterYAML, err := yaml.FromUnstructured([]unstructured.Unstructured{clusterObj}) + Expect(err).NotTo(HaveOccurred()) + clusterClassYAML, err := yaml.FromUnstructured(clusterClassAndTemplates) + Expect(err).NotTo(HaveOccurred()) + return clusterClassYAML, clusterYAML +} + +type workConcurrentlyAndWaitInput struct { + // ClusterNames is the names of clusters to work on. + ClusterNames []string + + // Concurrency is the maximum number of clusters to be created concurrently. + // NB. This also includes waiting for the clusters to be up and running. + // Example: If the concurrency is 2. It would create 2 clusters concurrently and wait + // till at least one of the clusters is up and running before it starts creating another + // cluster. + Concurrency int64 + + FailFast bool + + WorkerFunc func(ctx context.Context, inputChan chan string, errChan chan workResult, wg *sync.WaitGroup) +} + +func workConcurrentlyAndWait(ctx context.Context, input workConcurrentlyAndWaitInput) ([]workResult, error) { + Expect(ctx).NotTo(BeNil(), "ctx is required for workConcurrentlyAndWait") + Expect(input.Concurrency).To(BeNumerically(">", 0), "Invalid argument. input.Concurrency should be greater that 0") + + // Start a channel. This channel will be used to coordinate work with the workers. + // The channel is used to communicate the name of the cluster. + // Adding a new name to the channel implies that a new cluster of the given names needs to be processed. + inputChan := make(chan string) + wg := &sync.WaitGroup{} + doneChan := make(chan bool) + resultChan := make(chan workResult) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Start the workers. + for i := int64(0); i < input.Concurrency; i++ { + wg.Add(1) + go input.WorkerFunc(ctx, inputChan, resultChan, wg) + } + + // Adding the cluster names into the input channel. + go func() { + for _, name := range input.ClusterNames { + inputChan <- name + } + // All the clusters are requested. + // Close the channel to shut down workers as they become unused. + close(inputChan) + }() + + go func() { + // Wait for all the workers to shut down. + wg.Wait() + close(doneChan) + }() + + results := []workResult{} + +outer: + for { + select { + case result := <-resultChan: + results = append(results, result) + if result.err != nil && input.FailFast { + cancel() + } + case <-doneChan: + break outer + } + } + + // Clean up. All the workers are shut down. + // Close the result channel. + close(resultChan) + + errs := []error{} + for _, result := range results { + if result.err != nil { + if e, ok := result.err.(types.GinkgoError); ok { + errs = append(errs, errors.Errorf("[clusterName: %q] Stack trace: \n %s", result.clusterName, e.CodeLocation.FullStackTrace)) + } else { + errs = append(errs, errors.Errorf("[clusterName: %q] Error: %v", result.clusterName, result.err)) + } + } + } + + return results, kerrors.NewAggregate(errs) +} + +func createClusterAndWaitWorker(ctx context.Context, inputChan <-chan string, resultChan chan<- workResult, wg *sync.WaitGroup, baseTemplate []byte, baseClusterName string, input clusterctl.ApplyCustomClusterTemplateAndWaitInput) { + defer wg.Done() + + for { + done := func() bool { + select { + case <-ctx.Done(): + // If the context is cancelled, return and shutdown the worker. + return true + case clusterName, open := <-inputChan: + // Read the cluster name from the channel. + // If the channel is closed it implies there is no more work to be done. Return. + if !open { + return true + } + log.Logf("Creating cluster %s", clusterName) + + // This defer will catch ginkgo failures and record them. + // The recorded panics are then handled by the parent goroutine. + defer func() { + e := recover() + resultChan <- workResult{ + clusterName: clusterName, + err: e, + } + }() + + // Create the cluster template YAML with the target cluster name. + clusterTemplateYAML := bytes.Replace(baseTemplate, []byte(baseClusterName), []byte(clusterName), -1) + // Nb. Input is passed as a copy therefore we can safely update the value here, and it won't affect other + // workers. + input.CustomTemplateYAML = clusterTemplateYAML + input.ClusterName = clusterName + + clusterResources := &clusterctl.ApplyCustomClusterTemplateAndWaitResult{} + clusterctl.ApplyCustomClusterTemplateAndWait(ctx, input, clusterResources) + return false + } + }() + if done { + break + } + } +} + +func deleteClusterAndWaitWorker(ctx context.Context, inputChan <-chan string, resultChan chan<- workResult, wg *sync.WaitGroup, c client.Client, namespace string) { + defer wg.Done() + + for { + done := func() bool { + select { + case <-ctx.Done(): + // If the context is cancelled, return and shutdown the worker. + return true + case clusterName, open := <-inputChan: + // Read the cluster name from the channel. + // If the channel is closed it implies there is not more work to be done. Return. + if !open { + return true + } + log.Logf("Deleting cluster %s", clusterName) + + // This defer will catch ginkgo failures and record them. + // The recorded panics are then handled by the parent goroutine. + defer func() { + e := recover() + resultChan <- workResult{ + clusterName: clusterName, + err: e, + } + }() + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + } + framework.DeleteCluster(ctx, framework.DeleteClusterInput{ + Deleter: c, + Cluster: cluster, + }) + framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ + Getter: c, + Cluster: cluster, + }) + return false + } + }() + if done { + break + } + } +} + +type workResult struct { + clusterName string + err any +} diff --git a/test/e2e/scale_test.go b/test/e2e/scale_test.go new file mode 100644 index 000000000000..5bc343c271e2 --- /dev/null +++ b/test/e2e/scale_test.go @@ -0,0 +1,42 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" +) + +var _ = Describe("When scale testing using in-memory provider [Scale]", func() { + scaleSpec(ctx, func() scaleSpecInput { + return scaleSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + InfrastructureProvider: pointer.String("in-memory"), + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + ClusterCount: pointer.Int64(10), + Concurrency: pointer.Int64(5), + Flavor: pointer.String(""), + ControlPlaneMachineCount: pointer.Int64(3), + WorkerMachineCount: pointer.Int64(3), + } + }) +}) diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go index 7bde5a23dc42..18d5d10b948a 100644 --- a/test/e2e/self_hosted.go +++ b/test/e2e/self_hosted.go @@ -48,6 +48,13 @@ type SelfHostedSpecInput struct { ControlPlaneWaiters clusterctl.ControlPlaneWaiters Flavor string + // InfrastructureProviders specifies the infrastructure to use for clusterctl + // operations (Example: get cluster templates). + // Note: In most cases this need not be specified. It only needs to be specified when + // multiple infrastructure providers (ex: CAPD + in-memory) are installed on the cluster as clusterctl will not be + // able to identify the default. + InfrastructureProvider *string + // SkipUpgrade skip the upgrade of the self-hosted clusters kubernetes version. // If true, the variable KUBERNETES_VERSION is expected to be set. // If false, the variables KUBERNETES_VERSION_UPGRADE_FROM, KUBERNETES_VERSION_UPGRADE_TO, @@ -147,13 +154,17 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) clusterctlVariables["DOCKER_PRELOAD_IMAGES"] = `[` + strings.Join(images, ",") + `]` } + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), ClusterctlConfigPath: input.ClusterctlConfigPath, KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + InfrastructureProvider: infrastructureProvider, Flavor: input.Flavor, Namespace: namespace.Name, ClusterName: workloadClusterName, diff --git a/test/e2e/self_hosted_test.go b/test/e2e/self_hosted_test.go index d6d16717c782..11ee79afa660 100644 --- a/test/e2e/self_hosted_test.go +++ b/test/e2e/self_hosted_test.go @@ -27,11 +27,12 @@ import ( var _ = Describe("When testing Cluster API working on self-hosted clusters", func() { SelfHostedSpec(ctx, func() SelfHostedSpecInput { return SelfHostedSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + InfrastructureProvider: pointer.String("docker"), } }) }) @@ -45,6 +46,7 @@ var _ = Describe("When testing Cluster API working on self-hosted clusters using ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, Flavor: "topology", + InfrastructureProvider: pointer.String("docker"), ControlPlaneMachineCount: pointer.Int64(1), WorkerMachineCount: pointer.Int64(1), } @@ -60,6 +62,7 @@ var _ = Describe("When testing Cluster API working on self-hosted clusters using ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, Flavor: "topology", + InfrastructureProvider: pointer.String("docker"), ControlPlaneMachineCount: pointer.Int64(3), WorkerMachineCount: pointer.Int64(1), } @@ -75,6 +78,7 @@ var _ = Describe("When testing Cluster API working on single-node self-hosted cl ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, Flavor: "topology-single-node-cluster", + InfrastructureProvider: pointer.String("docker"), ControlPlaneMachineCount: pointer.Int64(1), WorkerMachineCount: pointer.Int64(0), } diff --git a/test/framework/cluster_helpers.go b/test/framework/cluster_helpers.go index 0024274ec7e4..3fc9bed8c413 100644 --- a/test/framework/cluster_helpers.go +++ b/test/framework/cluster_helpers.go @@ -153,7 +153,7 @@ type DeleteClusterInput struct { // DeleteCluster deletes the cluster. func DeleteCluster(ctx context.Context, input DeleteClusterInput) { - Byf("Deleting cluster %s", input.Cluster.GetName()) + Byf("Deleting cluster %s", klog.KObj(input.Cluster)) Expect(input.Deleter.Delete(ctx, input.Cluster)).To(Succeed()) } @@ -165,7 +165,7 @@ type WaitForClusterDeletedInput struct { // WaitForClusterDeleted waits until the cluster object has been deleted. func WaitForClusterDeleted(ctx context.Context, input WaitForClusterDeletedInput, intervals ...interface{}) { - Byf("Waiting for cluster %s to be deleted", input.Cluster.GetName()) + Byf("Waiting for cluster %s to be deleted", klog.KObj(input.Cluster)) Eventually(func() bool { cluster := &clusterv1.Cluster{} key := client.ObjectKey{ diff --git a/test/framework/clusterctl/clusterctl_helpers.go b/test/framework/clusterctl/clusterctl_helpers.go index 00f5e8af2278..c87faffed852 100644 --- a/test/framework/clusterctl/clusterctl_helpers.go +++ b/test/framework/clusterctl/clusterctl_helpers.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" @@ -219,7 +220,7 @@ type ApplyClusterTemplateAndWaitInput struct { } // Waiter is a function that runs and waits for a long-running operation to finish and updates the result. -type Waiter func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) +type Waiter func(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) // ControlPlaneWaiters are Waiter functions for the control plane. type ControlPlaneWaiters struct { @@ -270,7 +271,6 @@ func (r *ApplyClusterTemplateAndWaitResult) ExpectedTotalNodes() int32 { // ApplyClusterTemplateAndWait gets a cluster template using clusterctl, and waits for the cluster to be ready. // Important! this method assumes the cluster uses a KubeadmControlPlane and MachineDeployments. func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) { - setDefaults(&input) Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyClusterTemplateAndWait") Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyClusterTemplateAndWait") Expect(result).ToNot(BeNil(), "Invalid argument. result can't be nil when calling ApplyClusterTemplateAndWait") @@ -309,39 +309,100 @@ func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplate }) Expect(workloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template") - log.Logf("Applying the cluster template yaml to the cluster") + ApplyCustomClusterTemplateAndWait(ctx, ApplyCustomClusterTemplateAndWaitInput{ + ClusterProxy: input.ClusterProxy, + CustomTemplateYAML: workloadClusterTemplate, + ClusterName: input.ConfigCluster.ClusterName, + Namespace: input.ConfigCluster.Namespace, + CNIManifestPath: input.CNIManifestPath, + WaitForClusterIntervals: input.WaitForClusterIntervals, + WaitForControlPlaneIntervals: input.WaitForControlPlaneIntervals, + WaitForMachineDeployments: input.WaitForMachineDeployments, + WaitForMachinePools: input.WaitForMachinePools, + Args: input.Args, + PreWaitForCluster: input.PreWaitForCluster, + PostMachinesProvisioned: input.PostMachinesProvisioned, + ControlPlaneWaiters: input.ControlPlaneWaiters, + }, (*ApplyCustomClusterTemplateAndWaitResult)(result)) +} + +// ApplyCustomClusterTemplateAndWaitInput is the input type for ApplyCustomClusterTemplateAndWait. +type ApplyCustomClusterTemplateAndWaitInput struct { + ClusterProxy framework.ClusterProxy + CustomTemplateYAML []byte + ClusterName string + Namespace string + CNIManifestPath string + WaitForClusterIntervals []interface{} + WaitForControlPlaneIntervals []interface{} + WaitForMachineDeployments []interface{} + WaitForMachinePools []interface{} + Args []string // extra args to be used during `kubectl apply` + PreWaitForCluster func() + PostMachinesProvisioned func() + ControlPlaneWaiters +} + +type ApplyCustomClusterTemplateAndWaitResult struct { + ClusterClass *clusterv1.ClusterClass + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.KubeadmControlPlane + MachineDeployments []*clusterv1.MachineDeployment + MachinePools []*expv1.MachinePool +} + +func ApplyCustomClusterTemplateAndWait(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) { + setDefaults(&input) + Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyCustomClusterTemplateAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyCustomClusterTemplateAndWait") + Expect(input.CustomTemplateYAML).NotTo(BeEmpty(), "Invalid argument. input.CustomTemplateYAML can't be empty when calling ApplyCustomClusterTemplateAndWait") + Expect(input.ClusterName).NotTo(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling ApplyCustomClusterTemplateAndWait") + Expect(input.Namespace).NotTo(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling ApplyCustomClusterTemplateAndWait") + Expect(result).ToNot(BeNil(), "Invalid argument. result can't be nil when calling ApplyClusterTemplateAndWait") + + log.Logf("Creating the workload cluster with name %q form the provided yaml", input.ClusterName) + + // Ensure we have a Cluster for dump and cleanup steps in AfterEach even if ApplyClusterTemplateAndWait fails. + result.Cluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: input.ClusterName, + Namespace: input.Namespace, + }, + } + + log.Logf("Applying the cluster template yaml of cluster %s", klog.KRef(input.Namespace, input.ClusterName)) Eventually(func() error { - return input.ClusterProxy.Apply(ctx, workloadClusterTemplate, input.Args...) + return input.ClusterProxy.Apply(ctx, input.CustomTemplateYAML, input.Args...) }, 10*time.Second).Should(Succeed(), "Failed to apply the cluster template") // Once we applied the cluster template we can run PreWaitForCluster. // Note: This can e.g. be used to verify the BeforeClusterCreate lifecycle hook is executed // and blocking correctly. if input.PreWaitForCluster != nil { - log.Logf("Calling PreWaitForCluster") + log.Logf("Calling PreWaitForCluster for cluster %s", klog.KRef(input.Namespace, input.ClusterName)) input.PreWaitForCluster() } - log.Logf("Waiting for the cluster infrastructure to be provisioned") + log.Logf("Waiting for the cluster infrastructure of cluster %s to be provisioned", klog.KRef(input.Namespace, input.ClusterName)) result.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ Getter: input.ClusterProxy.GetClient(), - Namespace: input.ConfigCluster.Namespace, - Name: input.ConfigCluster.ClusterName, + Namespace: input.Namespace, + Name: input.ClusterName, }, input.WaitForClusterIntervals...) if result.Cluster.Spec.Topology != nil { result.ClusterClass = framework.GetClusterClassByName(ctx, framework.GetClusterClassByNameInput{ Getter: input.ClusterProxy.GetClient(), - Namespace: input.ConfigCluster.Namespace, + Namespace: input.Namespace, Name: result.Cluster.Spec.Topology.Class, }) } - log.Logf("Waiting for control plane to be initialized") + log.Logf("Waiting for control plane of cluster %s to be initialized", klog.KRef(input.Namespace, input.ClusterName)) input.WaitForControlPlaneInitialized(ctx, input, result) if input.CNIManifestPath != "" { - log.Logf("Installing a CNI plugin to the workload cluster") + log.Logf("Installing a CNI plugin to the workload cluster %s", klog.KRef(input.Namespace, input.ClusterName)) workloadCluster := input.ClusterProxy.GetWorkloadCluster(ctx, result.Cluster.Namespace, result.Cluster.Name) cniYaml, err := os.ReadFile(input.CNIManifestPath) @@ -350,16 +411,16 @@ func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplate Expect(workloadCluster.Apply(ctx, cniYaml)).ShouldNot(HaveOccurred()) } - log.Logf("Waiting for control plane to be ready") + log.Logf("Waiting for control plane of cluster %s to be ready", klog.KRef(input.Namespace, input.ClusterName)) input.WaitForControlPlaneMachinesReady(ctx, input, result) - log.Logf("Waiting for the machine deployments to be provisioned") + log.Logf("Waiting for the machine deployments of cluster %s to be provisioned", klog.KRef(input.Namespace, input.ClusterName)) result.MachineDeployments = framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{ Lister: input.ClusterProxy.GetClient(), Cluster: result.Cluster, }, input.WaitForMachineDeployments...) - log.Logf("Waiting for the machine pools to be provisioned") + log.Logf("Waiting for the machine pools of cluster %s to be provisioned", klog.KRef(input.Namespace, input.ClusterName)) result.MachinePools = framework.DiscoveryAndWaitForMachinePools(ctx, framework.DiscoveryAndWaitForMachinePoolsInput{ Getter: input.ClusterProxy.GetClient(), Lister: input.ClusterProxy.GetClient(), @@ -367,16 +428,16 @@ func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplate }, input.WaitForMachinePools...) if input.PostMachinesProvisioned != nil { - log.Logf("Calling PostMachinesProvisioned") + log.Logf("Calling PostMachinesProvisioned for cluster %s", klog.KRef(input.Namespace, input.ClusterName)) input.PostMachinesProvisioned() } } -// setDefaults sets the default values for ApplyClusterTemplateAndWaitInput if not set. +// setDefaults sets the default values for ApplyCustomClusterTemplateAndWaitInput if not set. // Currently, we set the default ControlPlaneWaiters here, which are implemented for KubeadmControlPlane. -func setDefaults(input *ApplyClusterTemplateAndWaitInput) { +func setDefaults(input *ApplyCustomClusterTemplateAndWaitInput) { if input.WaitForControlPlaneInitialized == nil { - input.WaitForControlPlaneInitialized = func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) { + input.WaitForControlPlaneInitialized = func(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) { result.ControlPlane = framework.DiscoveryAndWaitForControlPlaneInitialized(ctx, framework.DiscoveryAndWaitForControlPlaneInitializedInput{ Lister: input.ClusterProxy.GetClient(), Cluster: result.Cluster, @@ -385,7 +446,7 @@ func setDefaults(input *ApplyClusterTemplateAndWaitInput) { } if input.WaitForControlPlaneMachinesReady == nil { - input.WaitForControlPlaneMachinesReady = func(ctx context.Context, input ApplyClusterTemplateAndWaitInput, result *ApplyClusterTemplateAndWaitResult) { + input.WaitForControlPlaneMachinesReady = func(ctx context.Context, input ApplyCustomClusterTemplateAndWaitInput, result *ApplyCustomClusterTemplateAndWaitResult) { framework.WaitForControlPlaneAndMachinesReady(ctx, framework.WaitForControlPlaneAndMachinesReadyInput{ GetLister: input.ClusterProxy.GetClient(), Cluster: result.Cluster,