From 6c81d4a31c08728f8d5d557a17a93b3c2764632a Mon Sep 17 00:00:00 2001 From: Seth Pellegrino Date: Fri, 6 Mar 2020 16:02:32 -0800 Subject: [PATCH] test: include e2e test for KCP adoption --- test/framework/control_plane.go | 71 +----- test/framework/convenience.go | 11 +- test/framework/machines.go | 118 ++++++++++ test/framework/management_cluster.go | 2 +- .../docker/e2e/custom_assertions.go | 37 +++ .../docker/e2e/docker_suite_test.go | 4 + test/infrastructure/docker/e2e/docker_test.go | 211 ++++++++++++++++++ 7 files changed, 382 insertions(+), 72 deletions(-) create mode 100644 test/framework/machines.go diff --git a/test/framework/control_plane.go b/test/framework/control_plane.go index c9fcb11beec5..f1779e7b3bba 100644 --- a/test/framework/control_plane.go +++ b/test/framework/control_plane.go @@ -22,10 +22,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" @@ -128,67 +126,6 @@ func CreateKubeadmControlPlane(ctx context.Context, input CreateKubeadmControlPl }, intervals...).Should(Succeed()) } -// CreateMachineDeploymentInput is the input for CreateMachineDeployment. -type CreateMachineDeploymentInput struct { - Creator Creator - MachineDeployment *clusterv1.MachineDeployment - BootstrapConfigTemplate runtime.Object - InfraMachineTemplate runtime.Object -} - -// CreateMachineDeployment creates the machine deployment and dependencies. -func CreateMachineDeployment(ctx context.Context, input CreateMachineDeploymentInput) { - By("creating a core MachineDeployment resource") - Expect(input.Creator.Create(ctx, input.MachineDeployment)).To(Succeed()) - - By("creating a BootstrapConfigTemplate resource") - Expect(input.Creator.Create(ctx, input.BootstrapConfigTemplate)).To(Succeed()) - - By("creating an InfrastructureMachineTemplate resource") - Expect(input.Creator.Create(ctx, input.InfraMachineTemplate)).To(Succeed()) -} - -// WaitForMachineDeploymentNodesToExistInput is the input for WaitForMachineDeploymentNodesToExist. -type WaitForMachineDeploymentNodesToExistInput struct { - Lister Lister - Cluster *clusterv1.Cluster - MachineDeployment *clusterv1.MachineDeployment -} - -// WaitForMachineDeploymentNodesToExist waits until all nodes associated with a machine deployment exist. -func WaitForMachineDeploymentNodesToExist(ctx context.Context, input WaitForMachineDeploymentNodesToExistInput, intervals ...interface{}) { - By("waiting for the workload nodes to exist") - Eventually(func() (int, error) { - selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector) - if err != nil { - return 0, err - } - ms := &clusterv1.MachineSetList{} - if err := input.Lister.List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)); err != nil { - return 0, err - } - if len(ms.Items) == 0 { - return 0, errors.New("no machinesets were found") - } - machineSet := ms.Items[0] - selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) - if err != nil { - return 0, err - } - machines := &clusterv1.MachineList{} - if err := input.Lister.List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap)); err != nil { - return 0, err - } - count := 0 - for _, machine := range machines.Items { - if machine.Status.NodeRef != nil { - count++ - } - } - return count, nil - }, intervals...).Should(Equal(int(*input.MachineDeployment.Spec.Replicas))) -} - // WaitForClusterToProvisionInput is the input for WaitForClusterToProvision. type WaitForClusterToProvisionInput struct { Getter Getter @@ -223,14 +160,16 @@ func WaitForKubeadmControlPlaneMachinesToExist(ctx context.Context, input WaitFo By("waiting for all control plane nodes to exist") inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace) // ControlPlane labels + matchControlPlaneListOption := client.HasLabels{ + clusterv1.MachineControlPlaneLabelName, + } matchClusterListOption := client.MatchingLabels{ - clusterv1.MachineControlPlaneLabelName: "", - clusterv1.ClusterLabelName: input.Cluster.Name, + clusterv1.ClusterLabelName: input.Cluster.Name, } Eventually(func() (int, error) { machineList := &clusterv1.MachineList{} - if err := input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { + if err := input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchControlPlaneListOption, matchClusterListOption); err != nil { fmt.Println(err) return 0, err } diff --git a/test/framework/convenience.go b/test/framework/convenience.go index 479da5ae9f44..e2d6d77aa994 100644 --- a/test/framework/convenience.go +++ b/test/framework/convenience.go @@ -53,12 +53,13 @@ func WaitForAPIServiceAvailable(ctx context.Context, mgmt Waiter, serviceName st Expect(err).NotTo(HaveOccurred(), "stack: %+v", err) } -// WaitForPodsReadyInNamespace will wait for all pods to be Ready in the +// WaitForDeploymentsInNamespace will wait for all deployments to be Available in the // specified namespace. -// For example, kubectl wait --for=condition=Ready --timeout=300s --namespace capi-system pods --all -func WaitForPodsReadyInNamespace(ctx context.Context, cluster Waiter, namespace string) { - By(fmt.Sprintf("waiting for pods to be ready in namespace %q", namespace)) - err := cluster.Wait(ctx, "--for", "condition=Ready", "--timeout", "300s", "--namespace", namespace, "pods", "--all") +// For example, kubectl wait --for=condition=Available --timeout=300s --namespace capi-system deployments --all +func WaitForDeploymentsInNamespace(ctx context.Context, cluster Waiter, namespace string) { + By(fmt.Sprintf("waiting for deployments to be available in namespace %q", namespace)) + + err := cluster.Wait(ctx, "--for", "condition=Available", "--timeout", "300s", "--namespace", namespace, "deployments", "--all") Expect(err).NotTo(HaveOccurred(), "stack: %+v", err) } diff --git a/test/framework/machines.go b/test/framework/machines.go new file mode 100644 index 000000000000..e5282945fe2e --- /dev/null +++ b/test/framework/machines.go @@ -0,0 +1,118 @@ +package framework + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CreateMachineInput is the input for CreateMachine. +type CreateMachineInput struct { + Creator Creator + Machine *clusterv1.Machine + BootstrapConfig runtime.Object + InfraMachine runtime.Object +} + +// CreateMachine creates the machine and dependencies. +func CreateMachine(ctx context.Context, input CreateMachineInput) { + By("creating a core Machine resource") + Expect(input.Creator.Create(ctx, input.Machine)).To(Succeed()) + + By("creating a BootstrapConfigTemplate resource") + Expect(input.Creator.Create(ctx, input.BootstrapConfig)).To(Succeed()) + + By("creating an InfrastructureMachineTemplate resource") + Expect(input.Creator.Create(ctx, input.InfraMachine)).To(Succeed()) +} + +// WaitForMachineNodesToExistInput is the input for WaitForMachineNodesToExist. +type WaitForMachineNodesToExistInput struct { + Getter Getter + Machines []*clusterv1.Machine +} + +// WaitForMachineDeploymentNodesToExist waits until all nodes associated with a machine deployment exist. +func WaitForMachineNodesToExist(ctx context.Context, input WaitForMachineNodesToExistInput, intervals ...interface{}) { + By("waiting for the machines' nodes to exist") + Eventually(func() (count int, err error) { + for _, m := range input.Machines { + machine := &clusterv1.Machine{} + err = input.Getter.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, machine) + if err != nil { + return + } + if machine.Status.NodeRef != nil { + count++ + } + } + return + }, intervals...).Should(Equal(len(input.Machines))) +} + +// CreateMachineDeploymentInput is the input for CreateMachineDeployment. +type CreateMachineDeploymentInput struct { + Creator Creator + MachineDeployment *clusterv1.MachineDeployment + BootstrapConfigTemplate runtime.Object + InfraMachineTemplate runtime.Object +} + +// CreateMachineDeployment creates the machine deployment and dependencies. +func CreateMachineDeployment(ctx context.Context, input CreateMachineDeploymentInput) { + By("creating a core MachineDeployment resource") + Expect(input.Creator.Create(ctx, input.MachineDeployment)).To(Succeed()) + + By("creating a BootstrapConfigTemplate resource") + Expect(input.Creator.Create(ctx, input.BootstrapConfigTemplate)).To(Succeed()) + + By("creating an InfrastructureMachineTemplate resource") + Expect(input.Creator.Create(ctx, input.InfraMachineTemplate)).To(Succeed()) +} + +// WaitForMachineDeploymentNodesToExistInput is the input for WaitForMachineDeploymentNodesToExist. +type WaitForMachineDeploymentNodesToExistInput struct { + Lister Lister + Cluster *clusterv1.Cluster + MachineDeployment *clusterv1.MachineDeployment +} + +// WaitForMachineDeploymentNodesToExist waits until all nodes associated with a machine deployment exist. +func WaitForMachineDeploymentNodesToExist(ctx context.Context, input WaitForMachineDeploymentNodesToExistInput, intervals ...interface{}) { + By("waiting for the workload nodes to exist") + Eventually(func() (int, error) { + selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector) + if err != nil { + return 0, err + } + ms := &clusterv1.MachineSetList{} + if err := input.Lister.List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap)); err != nil { + return 0, err + } + if len(ms.Items) == 0 { + return 0, errors.New("no machinesets were found") + } + machineSet := ms.Items[0] + selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) + if err != nil { + return 0, err + } + machines := &clusterv1.MachineList{} + if err := input.Lister.List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap)); err != nil { + return 0, err + } + count := 0 + for _, machine := range machines.Items { + if machine.Status.NodeRef != nil { + count++ + } + } + return count, nil + }, intervals...).Should(Equal(int(*input.MachineDeployment.Spec.Replicas))) +} diff --git a/test/framework/management_cluster.go b/test/framework/management_cluster.go index 754161de5fe8..fd93063ee667 100644 --- a/test/framework/management_cluster.go +++ b/test/framework/management_cluster.go @@ -115,7 +115,7 @@ func InitManagementCluster(ctx context.Context, input *InitManagementClusterInpu for _, waiter := range component.Waiters { switch waiter.Type { case PodsWaiter: - WaitForPodsReadyInNamespace(ctx, managementCluster, waiter.Value) + WaitForDeploymentsInNamespace(ctx, managementCluster, waiter.Value) case ServiceWaiter: WaitForAPIServiceAvailable(ctx, managementCluster, waiter.Value) } diff --git a/test/infrastructure/docker/e2e/custom_assertions.go b/test/infrastructure/docker/e2e/custom_assertions.go index da460f2dede0..f7d9b243d0e6 100644 --- a/test/infrastructure/docker/e2e/custom_assertions.go +++ b/test/infrastructure/docker/e2e/custom_assertions.go @@ -24,6 +24,9 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + types "github.com/onsi/gomega/types" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" @@ -64,3 +67,37 @@ func ensureDockerArtifactsDeleted(input ensureDockerArtifactsDeletedInput) { Expect(dmtl.Items).To(HaveLen(0)) By("Succeeding in deleting all docker artifacts") } + +type controllerMatch struct { + kind string + owner metav1.Object +} + +func (m *controllerMatch) Match(actual interface{}) (success bool, err error) { + actualMeta, err := meta.Accessor(actual) + if err != nil { + return false, fmt.Errorf("unable to read meta for %T: %v", actual, err) + } + + owner := metav1.GetControllerOf(actualMeta) + if owner == nil { + return false, fmt.Errorf("no controller found (owner ref with controller = true) for object %#v", actual) + } + + match := (owner.Kind == m.kind && + owner.Name == m.owner.GetName() && owner.UID == m.owner.GetUID()) + + return match, nil +} + +func (m *controllerMatch) FailureMessage(actual interface{}) string { + return fmt.Sprintf("Expected\n\t%#vto have a controller reference pointing to %s/%s (%v)", actual, m.kind, m.owner.GetName(), m.owner.GetUID()) +} + +func (m *controllerMatch) NegatedFailureMessage(actual interface{}) string { + return fmt.Sprintf("Expected\n\t%#vto not have a controller reference pointing to %s/%s (%v)", actual, m.kind, m.owner.GetName(), m.owner.GetUID()) +} + +func HaveControllerRef(kind string, owner metav1.Object) types.GomegaMatcher { + return &controllerMatch{kind, owner} +} diff --git a/test/infrastructure/docker/e2e/docker_suite_test.go b/test/infrastructure/docker/e2e/docker_suite_test.go index cca9d003699b..a324a6da9466 100644 --- a/test/infrastructure/docker/e2e/docker_suite_test.go +++ b/test/infrastructure/docker/e2e/docker_suite_test.go @@ -131,6 +131,10 @@ var _ = AfterSuite(func() { }) func writeLogs(mgmt *CAPDCluster, namespace, deploymentName, logDir string) error { + if mgmt == nil { + return nil + } + c, err := mgmt.GetClient() if err != nil { return err diff --git a/test/infrastructure/docker/e2e/docker_test.go b/test/infrastructure/docker/e2e/docker_test.go index 0166da6ab3e3..0efb90fc5409 100644 --- a/test/infrastructure/docker/e2e/docker_test.go +++ b/test/infrastructure/docker/e2e/docker_test.go @@ -21,6 +21,7 @@ package e2e import ( "errors" "fmt" + "strings" "time" . "github.com/onsi/ginkgo" @@ -55,6 +56,7 @@ var _ = Describe("Docker", func() { }) AfterEach(func() { + By("cleaning up the test cluster") deleteClusterInput := framework.DeleteClusterInput{ Deleter: client, Cluster: cluster, @@ -207,6 +209,155 @@ var _ = Describe("Docker", func() { }, "10m", "30s").Should(Equal(int(*controlPlane.Spec.Replicas))) }) }) + + Describe("Controlplane Adoption", func() { + Specify("KubeadmControlPlane adopts up-to-date control plane Machines without modification", func() { + var ( + controlPlane *controlplanev1.KubeadmControlPlane + infraCluster *infrav1.DockerCluster + template *infrav1.DockerMachineTemplate + err error + ) + replicas := 1 /* TODO: can't seem to get CAPD to bootstrap a cluster with more than one control plane machine */ + cluster, infraCluster, controlPlane, template = clusterGen.GenerateCluster(namespace, int32(replicas)) + controlPlaneRef := cluster.Spec.ControlPlaneRef + cluster.Spec.ControlPlaneRef = nil + + // Set up the client to the management cluster + client, err = mgmt.GetClient() + Expect(err).NotTo(HaveOccurred()) + + // Set up the cluster object + createClusterInput := framework.CreateClusterInput{ + Creator: client, + Cluster: cluster, + InfraCluster: infraCluster, + } + framework.CreateCluster(ctx, createClusterInput) + + version := "1.16.3" + + // Wait for the cluster to provision. + assertClusterProvisionsInput := framework.WaitForClusterToProvisionInput{ + Getter: client, + Cluster: cluster, + } + framework.WaitForClusterToProvision(ctx, assertClusterProvisionsInput) + + initMachines, bootstrap, infra := generateControlPlaneMachines(cluster, namespace, version, replicas) + for i := 0; i < len(initMachines); i++ { + // we have to go one at a time, otherwise weird things start to happen + By("initializing control plane machines") + createMachineInput := framework.CreateMachineInput{ + Creator: client, + BootstrapConfig: bootstrap[i], + InfraMachine: infra[i], + Machine: initMachines[i], + } + framework.CreateMachine(ctx, createMachineInput) + + // Wait for the first control plane machine to boot + assertMachinesProvisionInput := framework.WaitForMachineNodesToExistInput{ + Getter: client, + Machines: initMachines[i : i+1], + } + framework.WaitForMachineNodesToExist(ctx, assertMachinesProvisionInput) + } + + // Set up the KubeadmControlPlane + createKubeadmControlPlaneInput := framework.CreateKubeadmControlPlaneInput{ + Creator: client, + ControlPlane: controlPlane, + MachineTemplate: template, + } + framework.CreateKubeadmControlPlane(ctx, createKubeadmControlPlaneInput) + + // We have to set the control plane ref on the cluster as well + cl := &clusterv1.Cluster{} + client.Get(ctx, ctrlclient.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Name}, cl) + cl.Spec.ControlPlaneRef = controlPlaneRef + Expect(client.Update(ctx, cl)).To(Succeed()) + + // Wait for the control plane to be ready + waitForControlPlaneToBeReadyInput := framework.WaitForControlPlaneToBeReadyInput{ + Getter: client, + ControlPlane: controlPlane, + } + framework.WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput) + + // Wait for the controlplane nodes to exist + assertKubeadmControlPlaneNodesExistInput := framework.WaitForKubeadmControlPlaneMachinesToExistInput{ + Lister: client, + Cluster: cluster, + ControlPlane: controlPlane, + } + framework.WaitForKubeadmControlPlaneMachinesToExist(ctx, assertKubeadmControlPlaneNodesExistInput, "10m", "10s") + + machines := clusterv1.MachineList{} + Expect(client.List(ctx, &machines, + ctrlclient.InNamespace(namespace), + ctrlclient.HasLabels{ + clusterv1.MachineControlPlaneLabelName, + })).To(Succeed()) + + By("taking stable ownership of the Machines") + for _, m := range machines.Items { + Expect(&m).To(HaveControllerRef(framework.TypeToKind(controlPlane), controlPlane)) + Expect(m.CreationTimestamp.Time).To(BeTemporally("<", controlPlane.CreationTimestamp.Time)) + } + Expect(machines.Items).To(HaveLen(1)) + + By("taking ownership of the cluster's PKI material") + secrets := corev1.SecretList{} + Expect(client.List(ctx, &secrets, ctrlclient.InNamespace(namespace), ctrlclient.MatchingLabels{ + clusterv1.ClusterLabelName: cluster.Name, + })).To(Succeed()) + + for _, s := range secrets.Items { + // We don't check the data, and removing it from the object makes assertions much easier to read + s.Data = nil + + // The bootstrap secret should still be owned by the bootstrap config so its cleaned up properly, + // but the cluster PKI materials should have their ownership transferred. + switch { + case strings.HasSuffix(s.Name, "-kubeconfig"): + // Do nothing + case strings.HasPrefix(s.Name, "bootstrap-"): + fi := -1 + for i, b := range bootstrap { + if s.Name == b.Name { + fi = i + } + } + Expect(fi).To(BeNumerically(">=", 0), "could not find matching bootstrap object for Secret %s", s.Name) + Expect(&s).To(HaveControllerRef(framework.TypeToKind(bootstrap[fi]), bootstrap[fi])) + default: + Expect(&s).To(HaveControllerRef(framework.TypeToKind(controlPlane), controlPlane)) + } + } + Expect(secrets.Items).To(HaveLen(4 /* pki */ + 1 /* kubeconfig */ + int(replicas))) + + By("ensuring we can still join machines after the adoption") + md, infraTemplate, bootstrapTemplate := GenerateMachineDeployment(cluster, 1) + + // Create the workload nodes + createMachineDeploymentinput := framework.CreateMachineDeploymentInput{ + Creator: client, + MachineDeployment: md, + BootstrapConfigTemplate: bootstrapTemplate, + InfraMachineTemplate: infraTemplate, + } + framework.CreateMachineDeployment(ctx, createMachineDeploymentinput) + + // Wait for the workload nodes to exist + waitForMachineDeploymentNodesToExistInput := framework.WaitForMachineDeploymentNodesToExistInput{ + Lister: client, + Cluster: cluster, + MachineDeployment: md, + } + framework.WaitForMachineDeploymentNodesToExist(ctx, waitForMachineDeploymentNodesToExistInput) + }) + }) }) }) @@ -351,3 +502,63 @@ func (c *ClusterGenerator) GenerateCluster(namespace string, replicas int32) (*c } return cluster, infraCluster, kcp, template } + +func generateControlPlaneMachines(cluster *clusterv1.Cluster, namespace, version string, replicas int) ([]*clusterv1.Machine, []*bootstrapv1.KubeadmConfig, []*infrav1.DockerMachine) { + machines := make([]*clusterv1.Machine, 0, replicas) + bootstrap := make([]*bootstrapv1.KubeadmConfig, 0, replicas) + infra := make([]*infrav1.DockerMachine, 0, replicas) + for i := 0; i < replicas; i++ { + bootstrap = append(bootstrap, &bootstrapv1.KubeadmConfig{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("bootstrap-controlplane-%d", i), + }, + Spec: bootstrapv1.KubeadmConfigSpec{ + ClusterConfiguration: &v1beta1.ClusterConfiguration{ + APIServer: v1beta1.APIServer{ + // Darwin support + CertSANs: []string{"127.0.0.1"}, + }, + }, + }, + }) + + infra = append(infra, &infrav1.DockerMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("controlplane-%d-infra", i), + }, + Spec: infrav1.DockerMachineSpec{}, + }) + + machines = append(machines, &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("controlplane-%d", i), + Labels: map[string]string{ + clusterv1.MachineControlPlaneLabelName: "true", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.GetName(), + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: bootstrapv1.GroupVersion.String(), + Kind: framework.TypeToKind(bootstrap[i]), + Namespace: bootstrap[i].GetNamespace(), + Name: bootstrap[i].GetName(), + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: framework.TypeToKind(infra[i]), + Namespace: infra[i].GetNamespace(), + Name: infra[i].GetName(), + }, + Version: &version, + }, + }) + } + + return machines, bootstrap, infra +}