Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-1.6] 🌱 Use kind as a secondary management cluster for clusterctl E2E tests #10676

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
157 changes: 103 additions & 54 deletions test/e2e/clusterctl_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,12 @@ type ClusterctlUpgradeSpecInput struct {
ClusterctlConfigPath string
BootstrapClusterProxy framework.ClusterProxy
ArtifactFolder string

// UseKindForManagementCluster instruct the test to use kind for creating the management cluster (instead to use the actual infrastructure provider).
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
// So we are creating a new management cluster where to install older version of providers
UseKindForManagementCluster bool

// InitWithBinary can be used to override the INIT_WITH_BINARY e2e config variable with the URL of the clusterctl binary of the old version of Cluster API. The spec will interpolate the
// strings `{OS}` and `{ARCH}` to `runtime.GOOS` and `runtime.GOARCH` respectively, e.g. https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/clusterctl-{OS}-{ARCH}
InitWithBinary string
Expand Down Expand Up @@ -192,16 +198,22 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
managementClusterNamespace *corev1.Namespace
managementClusterCancelWatches context.CancelFunc
managementClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
managementClusterProvider bootstrap.ClusterProvider
managementClusterProxy framework.ClusterProxy

initClusterctlBinaryURL string
initContract string
initKubernetesVersion string

workloadClusterName string

scheme *apiruntime.Scheme
)

BeforeEach(func() {
scheme = apiruntime.NewScheme()
framework.TryAddDefaultSchemes(scheme)

Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
input = inputGetter()
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
Expand Down Expand Up @@ -247,73 +259,104 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion))
Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)

// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
managementClusterNamespace, managementClusterCancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
// If the test is not being run in a separated kind cluster, setup a Namespace in the current bootstrap cluster where to host objects for this spec and create a watcher for the namespace events.
if !input.UseKindForManagementCluster {
managementClusterNamespace, managementClusterCancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
}
managementClusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})

It("Should create a management cluster and then upgrade all the providers", func() {
By("Creating a workload cluster to be used as a new management cluster")
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
// So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers
infrastructureProvider := clusterctl.DefaultInfrastructureProvider
if input.InfrastructureProvider != nil {
infrastructureProvider = *input.InfrastructureProvider
}
managementClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: infrastructureProvider,
Flavor: input.MgmtFlavor,
Namespace: managementClusterNamespace.Name,
ClusterName: managementClusterName,
KubernetesVersion: initKubernetesVersion,
ControlPlaneMachineCount: pointer.Int64(1),
WorkerMachineCount: pointer.Int64(1),
},
PreWaitForCluster: func() {
if input.PreWaitForCluster != nil {
input.PreWaitForCluster(input.BootstrapClusterProxy, managementClusterNamespace.Name, managementClusterName)
}
},
CNIManifestPath: input.CNIManifestPath,
ControlPlaneWaiters: input.ControlPlaneWaiters,
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
}, managementClusterResources)

By("Turning the workload cluster into a management cluster with older versions of providers")

// If the cluster is a DockerCluster, we should load controller images into the nodes.
// Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using
// this approach because this allows to have a single source of truth for images, the e2e config
// Nb. the images for official version of the providers will be pulled from internet, but the latest images must be
// built locally and loaded into kind
cluster := managementClusterResources.Cluster
if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" {
Expect(bootstrap.LoadImagesToKindCluster(ctx, bootstrap.LoadImagesToKindClusterInput{
Name: cluster.Name,
Images: input.E2EConfig.Images,
})).To(Succeed())
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
// So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers
managementClusterName = fmt.Sprintf("%s-management-%s", specName, util.RandomString(6))
managementClusterLogFolder := filepath.Join(input.ArtifactFolder, "clusters", managementClusterName)
if input.UseKindForManagementCluster {
By("Creating a kind cluster to be used as a new management cluster")

managementClusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
Name: managementClusterName,
KubernetesVersion: initKubernetesVersion,
RequiresDockerSock: input.E2EConfig.HasDockerProvider(),
// Note: most of this images won't be used while starting the controllers, because it is used to spin up older versions of CAPI. Those images will be eventually used when upgrading to current.
Images: input.E2EConfig.Images,
IPFamily: input.E2EConfig.GetVariable(IPFamily),
LogFolder: filepath.Join(managementClusterLogFolder, "logs-kind"),
})
Expect(managementClusterProvider).ToNot(BeNil(), "Failed to create a kind cluster")

kubeconfigPath := managementClusterProvider.GetKubeconfigPath()
Expect(kubeconfigPath).To(BeAnExistingFile(), "Failed to get the kubeconfig file for the kind cluster")

managementClusterProxy = framework.NewClusterProxy(managementClusterName, kubeconfigPath, scheme)
Expect(managementClusterProxy).ToNot(BeNil(), "Failed to get a kind cluster proxy")

managementClusterResources.Cluster = &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: managementClusterName,
},
}
} else {
By("Creating a workload cluster to be used as a new management cluster")

clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: infrastructureProvider,
Flavor: input.MgmtFlavor,
Namespace: managementClusterNamespace.Name,
ClusterName: managementClusterName,
KubernetesVersion: initKubernetesVersion,
ControlPlaneMachineCount: pointer.Int64(1),
WorkerMachineCount: pointer.Int64(1),
},
PreWaitForCluster: func() {
if input.PreWaitForCluster != nil {
input.PreWaitForCluster(input.BootstrapClusterProxy, managementClusterNamespace.Name, managementClusterName)
}
},
CNIManifestPath: input.CNIManifestPath,
ControlPlaneWaiters: input.ControlPlaneWaiters,
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
}, managementClusterResources)

// If the cluster is a DockerCluster, we should load controller images into the nodes.
// Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using
// this approach because this allows to have a single source of truth for images, the e2e config
// Nb. the images for official version of the providers will be pulled from internet, but the latest images must be
// built locally and loaded into kind
cluster := managementClusterResources.Cluster
if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" {
Expect(bootstrap.LoadImagesToKindCluster(ctx, bootstrap.LoadImagesToKindClusterInput{
Name: cluster.Name,
Images: input.E2EConfig.Images,
})).To(Succeed())
}

// Get a ClusterProxy so we can interact with the workload cluster
managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()))
}

// Get a ClusterProxy so we can interact with the workload cluster
managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()))
By("Turning the new cluster into a management cluster with older versions of providers")

// Download the clusterctl version that should be used to initially set up the management cluster (which is later upgraded).
Byf("Downloading clusterctl binary from %s", initClusterctlBinaryURL)
clusterctlBinaryPath, clusterctlConfigPath := setupClusterctl(ctx, initClusterctlBinaryURL, input.ClusterctlConfigPath)
defer os.Remove(clusterctlBinaryPath) // clean up

By("Initializing the workload cluster with older versions of providers")
By("Initializing the new management cluster with older versions of providers")

if input.PreInit != nil {
By("Running Pre-init steps against the management cluster")
By("Running Pre-init steps against the new management cluster")
input.PreInit(managementClusterProxy)
}

Expand Down Expand Up @@ -356,7 +399,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
IPAMProviders: ipamProviders,
RuntimeExtensionProviders: runtimeExtensionProviders,
AddonProviders: addonProviders,
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
LogFolder: managementClusterLogFolder,
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)

By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!")
Expand All @@ -375,7 +418,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
// In this case ApplyClusterTemplateAndWait can't be used because this helper is linked to the last version of the API;
// so we are getting a template using the downloaded version of clusterctl, applying it, and wait for machines to be provisioned.

workloadClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
workloadClusterName = fmt.Sprintf("%s-workload-%s", specName, util.RandomString(6))
workloadClusterNamespace := testNamespace.Name
kubernetesVersion := input.WorkloadKubernetesVersion
if kubernetesVersion == "" {
Expand Down Expand Up @@ -543,7 +586,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
IPAMProviders: upgrade.IPAMProviders,
RuntimeExtensionProviders: upgrade.RuntimeExtensionProviders,
AddonProviders: upgrade.AddonProviders,
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
LogFolder: managementClusterLogFolder,
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)
} else {
Byf("[%d] Upgrading providers to the latest version available", i)
Expand All @@ -553,7 +596,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
ClusterctlVariables: input.UpgradeClusterctlVariables,
ClusterProxy: managementClusterProxy,
Contract: upgrade.Contract,
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
LogFolder: managementClusterLogFolder,
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)
}

Expand Down Expand Up @@ -686,8 +729,14 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
By("Running PreCleanupManagementCluster steps against the management cluster")
input.PreCleanupManagementCluster(managementClusterProxy)
}

// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
if input.UseKindForManagementCluster {
managementClusterProxy.Dispose(ctx)
managementClusterProvider.Dispose(ctx)
} else {
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
}
})
}

Expand Down
59 changes: 33 additions & 26 deletions test/e2e/clusterctl_upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,9 @@ var _ = Describe("When testing clusterctl upgrades (v0.3=>v1.5=>current)", func(
UpgradeClusterctlVariables: map[string]string{
"CLUSTER_TOPOLOGY": "false",
},
MgmtFlavor: "topology",
WorkloadFlavor: "",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand Down Expand Up @@ -134,10 +135,11 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.5=>current)", func(
},
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v0.4/bases.
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
InitWithKubernetesVersion: "v1.23.17",
WorkloadKubernetesVersion: "v1.23.17",
MgmtFlavor: "topology",
WorkloadFlavor: "",
InitWithKubernetesVersion: "v1.23.17",
WorkloadKubernetesVersion: "v1.23.17",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -163,10 +165,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() {
// try to deploy the latest version of our test-extension from docker.yaml.
InitWithRuntimeExtensionProviders: []string{},
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v1.0/bases.
InitWithKubernetesVersion: "v1.23.17",
WorkloadKubernetesVersion: "v1.23.17",
MgmtFlavor: "topology",
WorkloadFlavor: "",
InitWithKubernetesVersion: "v1.23.17",
WorkloadKubernetesVersion: "v1.23.17",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -189,10 +192,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() {
InitWithInfrastructureProviders: []string{"docker:v1.4.5"},
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.4/bases.
InitWithKubernetesVersion: "v1.27.3",
WorkloadKubernetesVersion: "v1.27.3",
MgmtFlavor: "topology",
WorkloadFlavor: "",
InitWithKubernetesVersion: "v1.27.3",
WorkloadKubernetesVersion: "v1.27.3",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -215,10 +219,11 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.4=>cur
InitWithInfrastructureProviders: []string{"docker:v1.4.5"},
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.4/bases.
InitWithKubernetesVersion: "v1.27.3",
WorkloadKubernetesVersion: "v1.27.3",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
InitWithKubernetesVersion: "v1.27.3",
WorkloadKubernetesVersion: "v1.27.3",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -235,10 +240,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.5=>current)", func() {
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/clusterctl-{OS}-{ARCH}",
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.5/bases.
InitWithKubernetesVersion: "v1.28.0",
WorkloadKubernetesVersion: "v1.28.0",
MgmtFlavor: "topology",
WorkloadFlavor: "",
InitWithKubernetesVersion: "v1.28.0",
WorkloadKubernetesVersion: "v1.28.0",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -255,10 +261,11 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.5=>cur
InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/clusterctl-{OS}-{ARCH}",
InitWithProvidersContract: "v1beta1",
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.5/bases.
InitWithKubernetesVersion: "v1.28.0",
WorkloadKubernetesVersion: "v1.28.0",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
InitWithKubernetesVersion: "v1.28.0",
WorkloadKubernetesVersion: "v1.28.0",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
UseKindForManagementCluster: true,
}
})
})
Loading
Loading