Skip to content

Commit

Permalink
Add BeforeClusterDelete to runtimeSDK e2e tests
Browse files Browse the repository at this point in the history
Signed-off-by: killianmuldoon <kmuldoon@vmware.com>
  • Loading branch information
killianmuldoon committed Jul 4, 2022
1 parent 82d823b commit 9b384af
Show file tree
Hide file tree
Showing 3 changed files with 94 additions and 12 deletions.
75 changes: 63 additions & 12 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
Expand Down Expand Up @@ -125,7 +126,7 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})

It("Should create and upgrade a workload cluster", func() {
It("Should create, upgrade and delete a workload cluster", func() {
clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
By("Deploy Test Extension")
testExtensionDeploymentTemplate, err := os.ReadFile(testExtensionPath) //nolint:gosec
Expand Down Expand Up @@ -228,11 +229,28 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl
WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"),
})

By("Deleting the workload cluster")
cluster := &clusterv1.Cluster{}
Eventually(func() error {
return input.BootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKey{Namespace: namespace.Name, Name: clusterName}, cluster)
}).Should(Succeed())

// Dump all the logs from the workload cluster before deleting them.
input.BootstrapClusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", cluster.Name))

framework.DeleteCluster(ctx, framework.DeleteClusterInput{
Deleter: input.BootstrapClusterProxy.GetClient(),
Cluster: cluster,
})

beforeClusterDeleteHandler(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"))

By("Checking all lifecycle hooks have been called")
// Assert that each hook has been called and returned "Success" during the test.
err = checkLifecycleHookResponses(ctx, input.BootstrapClusterProxy.GetClient(), namespace.Name, clusterName, map[string]string{
"BeforeClusterCreate": "Success",
"BeforeClusterUpgrade": "Success",
"BeforeClusterDelete": "Success",
"AfterControlPlaneInitialized": "Success",
"AfterControlPlaneUpgrade": "Success",
"AfterClusterUpgrade": "Success",
Expand Down Expand Up @@ -300,6 +318,7 @@ func responsesConfigMap(name string, namespace *corev1.Namespace) *corev1.Config
"BeforeClusterCreate-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
"BeforeClusterUpgrade-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
"AfterControlPlaneUpgrade-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),
"BeforeClusterDelete-preloadedResponse": fmt.Sprintf(`{"Status": "Failure", "Message": %q}`, hookFailedMessage),

// Non-blocking hooks are set to Status:Success.
"AfterControlPlaneInitialized-preloadedResponse": `{"Status": "Success"}`,
Expand Down Expand Up @@ -347,9 +366,8 @@ func getLifecycleHookResponsesFromConfigMap(ctx context.Context, c client.Client
// beforeClusterCreateTestHandler calls runtimeHookTestHandler with a blockedCondition function which returns false if
// the Cluster has entered ClusterPhaseProvisioned.
func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
log.Logf("Blocking with BeforeClusterCreate hook")
hookName := "BeforeClusterCreate"
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, func() bool {
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, true, func() bool {
blocked := true
// This hook should block the Cluster from entering the "Provisioned" state.
cluster := &clusterv1.Cluster{}
Expand All @@ -371,9 +389,8 @@ func beforeClusterCreateTestHandler(ctx context.Context, c client.Client, namesp
// beforeClusterUpgradeTestHandler calls runtimeHookTestHandler with a blocking function which returns false if the
// Cluster has controlplanev1.RollingUpdateInProgressReason in its ReadyCondition.
func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
log.Logf("Blocking with BeforeClusterUpgrade hook")
hookName := "BeforeClusterUpgrade"
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, func() bool {
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, true, func() bool {
var blocked = true

cluster := &clusterv1.Cluster{}
Expand All @@ -397,9 +414,8 @@ func beforeClusterUpgradeTestHandler(ctx context.Context, c client.Client, names
// afterControlPlaneUpgradeTestHandler calls runtimeHookTestHandler with a blocking function which returns false if any
// MachineDeployment in the Cluster has upgraded to the target Kubernetes version.
func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, namespace, clusterName, version string, intervals []interface{}) {
log.Logf("Blocking with AfterControlPlaneUpgrade hook")
hookName := "AfterControlPlaneUpgrade"
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, func() bool {
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, true, func() bool {
var blocked = true
cluster := &clusterv1.Cluster{}
Eventually(func() error {
Expand Down Expand Up @@ -429,15 +445,47 @@ func afterControlPlaneUpgradeTestHandler(ctx context.Context, c client.Client, n
}, intervals)
}

// beforeClusterDeleteHandler calls runtimeHookTestHandler with a blocking function which returns false if any of the
// MachineDeployments associated with the Cluster are in a deleting state.
func beforeClusterDeleteHandler(ctx context.Context, c client.Client, namespace, clusterName string, intervals []interface{}) {
hookName := "BeforeClusterDelete"
runtimeHookTestHandler(ctx, c, namespace, clusterName, hookName, false, func() bool {
var blocked = true
mds := &clusterv1.MachineDeploymentList{}
Eventually(func() error {
return c.List(ctx, mds, client.MatchingLabels{
clusterv1.ClusterLabelName: clusterName,
clusterv1.ClusterTopologyOwnedLabel: "",
})
}).Should(Succeed())

// If any of the MachineDeployments has a deletion timestamp the delete process is unblocked.
for _, md := range mds.Items {
if md.ObjectMeta.DeletionTimestamp != nil {
blocked = false
}
}

// If the Cluster is not found it has been deleted and the hook is unblocked.
if apierrors.IsNotFound(c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, &clusterv1.Cluster{})) {
blocked = false
}

return blocked
}, intervals)
}

// runtimeHookTestHandler runs a series of tests in sequence to check if the runtimeHook passed to it succeeds.
// 1) Checks that the hook has been called at least once the TopologyReconciled condition is a Failure.
// 1) Checks that the hook has been called at least once, and if set by withTopologyReconciledCondition, that the TopologyReconciled condition is a Failure.
// 2) Check that the hook's blockingCondition is consistently true.
// - At this point the function sets the hook's response to be non-blocking.
// 3) Check that the hook's blocking condition becomes false.
// Note: runtimeHookTestHandler assumes that the hook passed to it is currently returning a blocking response.
// Updating the response to be non-blocking happens inline in the function.
func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clusterName, hookName string, blockingCondition func() bool, intervals []interface{}) {
// Check that the LifecycleHook has been called at least once and the TopologyReconciled condition is a Failure.
func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clusterName, hookName string, withTopologyReconciledCondition bool, blockingCondition func() bool, intervals []interface{}) {
log.Logf("Blocking with %s hook", hookName)

// Check that the LifecycleHook has been called at least once and - when required - that the TopologyReconciled condition is a Failure.
Eventually(func() error {
if err := checkLifecycleHooksCalledAtLeastOnce(ctx, c, namespace, clusterName, []string{hookName}); err != nil {
return err
Expand All @@ -446,11 +494,14 @@ func runtimeHookTestHandler(ctx context.Context, c client.Client, namespace, clu
if err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: clusterName}, cluster); err != nil {
return err
}
if !(conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) == clusterv1.TopologyReconcileFailedReason) {

// Check for the existence of the condition if withTopologyReconciledCondition is true.
if withTopologyReconciledCondition &&
(conditions.GetReason(cluster, clusterv1.TopologyReconciledCondition) != clusterv1.TopologyReconcileFailedReason) {
return errors.New("Condition not found on Cluster object")
}
return nil
}, 60*time.Second).Should(Succeed(), "%s has not been called", hookName)
}, intervals...).Should(Succeed(), "%s has not been called", hookName)

// blockingCondition should consistently be true as the Runtime hook is returning "Failure".
Consistently(func() bool {
Expand Down
17 changes: 17 additions & 0 deletions test/extension/handlers/lifecycle/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,23 @@ func (h *Handler) DoAfterClusterUpgrade(ctx context.Context, request *runtimehoo
}
}

// DoBeforeClusterDelete implements the BeforeClusterCreate hook.
func (h *Handler) DoBeforeClusterDelete(ctx context.Context, request *runtimehooksv1.BeforeClusterDeleteRequest, response *runtimehooksv1.BeforeClusterDeleteResponse) {
log := ctrl.LoggerFrom(ctx)
log.Info("BeforeClusterDelete is called")
cluster := request.Cluster

if err := h.readResponseFromConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterDelete, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
return
}
if err := h.recordCallInConfigMap(ctx, cluster.Name, cluster.Namespace, runtimehooksv1.BeforeClusterDelete, response); err != nil {
response.Status = runtimehooksv1.ResponseStatusFailure
response.Message = err.Error()
}
}

func (h *Handler) readResponseFromConfigMap(ctx context.Context, name, namespace string, hook runtimecatalog.Hook, response runtimehooksv1.ResponseObject) error {
hookName := runtimecatalog.HookName(hook)
configMap := &corev1.ConfigMap{}
Expand Down
14 changes: 14 additions & 0 deletions test/extension/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ func main() {
setupLog.Error(err, "error adding handler")
os.Exit(1)
}

if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
Hook: runtimehooksv1.AfterControlPlaneInitialized,
Name: "after-control-plane-initialized",
Expand All @@ -165,6 +166,7 @@ func main() {
setupLog.Error(err, "error adding handler")
os.Exit(1)
}

if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
Hook: runtimehooksv1.BeforeClusterUpgrade,
Name: "before-cluster-upgrade",
Expand All @@ -186,6 +188,7 @@ func main() {
setupLog.Error(err, "error adding handler")
os.Exit(1)
}

if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
Hook: runtimehooksv1.AfterClusterUpgrade,
Name: "after-cluster-upgrade",
Expand All @@ -197,6 +200,17 @@ func main() {
os.Exit(1)
}

if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
Hook: runtimehooksv1.BeforeClusterDelete,
Name: "before-cluster-delete",
HandlerFunc: lifecycleHandler.DoBeforeClusterDelete,
TimeoutSeconds: pointer.Int32(5),
FailurePolicy: toPtr(runtimehooksv1.FailurePolicyFail),
}); err != nil {
setupLog.Error(err, "error adding handler")
os.Exit(1)
}

setupLog.Info("starting RuntimeExtension", "version", version.Get().String())
if err := webhookServer.Start(ctx); err != nil {
setupLog.Error(err, "error running webhook server")
Expand Down

0 comments on commit 9b384af

Please sign in to comment.