Skip to content

Commit

Permalink
Add BeforeClusterDelete to runtimeSDK e2e tests
Browse files Browse the repository at this point in the history
Signed-off-by: killianmuldoon <kmuldoon@vmware.com>
  • Loading branch information
killianmuldoon committed Jul 1, 2022
1 parent 82d823b commit 962c4b0
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 4 deletions.
57 changes: 53 additions & 4 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})

It("Should create and upgrade a workload cluster", func() {
It("Should create, upgrade and delete a workload cluster", func() {
clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
By("Deploy Test Extension")
testExtensionDeploymentTemplate, err := os.ReadFile(testExtensionPath) //nolint:gosec
Expand Down Expand Up @@ -239,7 +239,32 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
})
Expect(err).ToNot(HaveOccurred(), "Lifecycle hook calls were not as expected")

By("Deleting the workload cluster")
cluster := &clusterv1.Cluster{}
Eventually(func() error {
return input.BootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKey{Namespace: namespace.Name, Name: clusterName}, cluster)
}).Should(Succeed())

// Dump all the logs from the workload cluster before deleting them.
input.BootstrapClusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", cluster.Name))

Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name)

// Dump all Cluster API related resources to artifacts before deleting them.
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
Lister: input.BootstrapClusterProxy.GetClient(),
Namespace: namespace.Name,
LogPath: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName(), "resources"),
})

beforeClusterDeleteHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))

framework.DeleteClusterAndWait(ctx, framework.DeleteClusterAndWaitInput{
Client: input.BootstrapClusterProxy.GetClient(),
Cluster: cluster,
}, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...)
By("PASSED!")

})

AfterEach(func() {
Expand Down Expand Up @@ -300,6 +325,7 @@ func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.Config
"BeforeClusterCreate-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
"BeforeClusterUpgrade-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
"AfterControlPlaneUpgrade-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
"BeforeClusterDelete-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),

// Non-blocking hooks are set to Status:Success.
"AfterControlPlaneInitialized-preloadedResponse": `{"Status": "Success"}`,
Expand Down Expand Up @@ -347,7 +373,6 @@ func getLifecycleHookResponsesFromConfigMap(ctx context.Context, c client.Client
// beforeClusterCreateTestHandler calls runtimeHookTestHandler with a blockedCondition function which returns false if
// the Cluster has entered ClusterPhaseProvisioned.
func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
log.Logf("Blocking with BeforeClusterCreate hook")
hookName := "BeforeClusterCreate"
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, func() bool {
blocked := true
Expand All @@ -371,7 +396,6 @@ func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namesp
// beforeClusterUpgradeTestHandler calls runtimeHookTestHandler with a blocking function which returns false if the
// Cluster has controlplanev1.RollingUpdateInProgressReason in its ReadyCondition.
func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
log.Logf("Blocking with BeforeClusterUpgrade hook")
hookName := "BeforeClusterUpgrade"
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, func() bool {
var blocked = true
Expand All @@ -397,7 +421,6 @@ func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, names
// afterControlPlaneUpgradeTestHandler calls runtimeHookTestHandler with a blocking function which returns false if any
// MachineDeployment in the Cluster has upgraded to the target Kubernetes version.
func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, version string, intervals []interface{}) {
log.Logf("Blocking with AfterControlPlaneUpgrade hook")
hookName := "AfterControlPlaneUpgrade"
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, func() bool {
var blocked = true
Expand Down Expand Up @@ -429,6 +452,30 @@ func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, n
}, intervals)
}

// beforeClusterDeleteHandler calls runtimeHookTestHandler with a blocking function which returns false if any of the
// MachineDeployments associated with the Cluster are in a deleting state.
func beforeClusterDeleteHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
hookName := "AfterControlPlaneUpgrade"
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, func() bool {
var blocked = true
mds := &clusterv1.MachineDeploymentList{}
Eventually(func() error {
return c.List(ctx, mds, client.MatchingLabels{
clusterv1.ClusterLabelName: clusterName,
clusterv1.ClusterTopologyOwnedLabel: "",
})
}).Should(Succeed())

// If any of the MachineDeployments has a deletion timestamp the delete process is unblocked.
for _, md := range mds.Items {
if md.ObjectMeta.DeletionTimestamp != nil {
blocked = false
}
}
return blocked
}, intervals)
}

// runtimeHookTestHandler runs a series of tests in sequence to check if the runtimeHook passed to it succeeds.
// 1) Checks that the hook has been called at least once the TopologyReconciled condition is a Failure.
// 2) Check that the hook's blockingCondition is consistently true.
Expand All @@ -437,6 +484,8 @@ func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, n
// Note: runtimeHookTestHandler assumes that the hook passed to it is currently returning a blocking response.
// Updating the response to be non-blocking happens inline in the function.
func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clusterName, hookName string, blockingCondition func() bool, intervals []interface{}) {
log.Logf("Blocking with %s hook", hookName)

// Check that the LifecycleHook has been called at least once and the TopologyReconciled condition is a Failure.
Eventually(func() error {
if err := checkLifecycleHooksCalledAtLeastOnce(ctx, c, namespace, clusterName, []string{hookName}); err != nil {
Expand Down
17 changes: 17 additions & 0 deletions test/extension/handlers/lifecycle/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,23 @@ func (h *Handler) DoAfterClusterUpgrade(ctx context.Context, request *runtimehoo
}
}

// DoBeforeClusterDelete implements the BeforeClusterCreate hook.
func (h *Handler) DoBeforeClusterDelete(ctx context.Context, request *runtimehooksv1.BeforeClusterDeleteRequest, response *runtimehooksv1.BeforeClusterDeleteResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterDelete is called")
cluster := request.Cluster

if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterDelete, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterDelete, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
}
}

func (h *Handler) readResponseFromConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
hookName := runtimecatalog.HookName(hook)
configMap := &corev1.ConfigMap{}
Expand Down
14 changes: 14 additions & 0 deletions test/extension/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ func main() {
setupLog.Error(err, "error adding handler")
os.Exit(1)
}

if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
Hook: runtimehooksv1.AfterControlPlaneInitialized,
Name: "after-control-plane-initialized",
Expand All @@ -165,6 +166,7 @@ func main() {
setupLog.Error(err, "error adding handler")
os.Exit(1)
}

if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
Hook: runtimehooksv1.BeforeClusterUpgrade,
Name: "before-cluster-upgrade",
Expand All @@ -186,6 +188,7 @@ func main() {
setupLog.Error(err, "error adding handler")
os.Exit(1)
}

if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
Hook: runtimehooksv1.AfterClusterUpgrade,
Name: "after-cluster-upgrade",
Expand All @@ -197,6 +200,17 @@ func main() {
os.Exit(1)
}

if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
Hook: runtimehooksv1.BeforeClusterDelete,
Name: "before-cluster-delete",
HandlerFunc: lifecycleHandler.DoBeforeClusterDelete,
TimeoutSeconds: pointer.Int32(5),
FailurePolicy: toPtr(runtimehooksv1.FailurePolicyFail),
}); err != nil {
setupLog.Error(err, "error adding handler")
os.Exit(1)
}

setupLog.Info("starting RuntimeExtension", "version", version.Get().String())
if err := webhookServer.Start(ctx); err != nil {
setupLog.Error(err, "error running webhook server")
Expand Down

0 comments on commit 962c4b0

Please sign in to comment.