diff --git a/e2e/flags.go b/e2e/flags.go index f419367cda..c884ce85bf 100644 --- a/e2e/flags.go +++ b/e2e/flags.go @@ -65,6 +65,10 @@ var Stress = flag.Bool("stress", false, var KCC = flag.Bool("kcc", false, "If true, run kcc tests.") +// VPA enables running the e2e tests for vertical pod autoscaling. +var VPA = flag.Bool("vpa", false, + "If true, run VPA tests.") + // GceNode enables running the e2e tests for 'gcenode' auth type var GceNode = flag.Bool("gcenode", false, "If true, run test with 'gcenode' auth type.") diff --git a/e2e/nomostest/client.go b/e2e/nomostest/client.go index a3e3f3f94f..b7439ff689 100644 --- a/e2e/nomostest/client.go +++ b/e2e/nomostest/client.go @@ -30,6 +30,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" + autoscalingv1vpa "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" "kpt.dev/configsync/e2e" "kpt.dev/configsync/e2e/nomostest/clusters" @@ -91,6 +92,7 @@ func newScheme(t testing.NTB) *runtime.Scheme { rbacv1beta1.SchemeBuilder, resourcegroupv1alpha1.SchemeBuilder.SchemeBuilder, apiregistrationv1.SchemeBuilder, + autoscalingv1vpa.SchemeBuilder, } for _, b := range builders { err := b.AddToScheme(s) diff --git a/e2e/nomostest/clusters/gke.go b/e2e/nomostest/clusters/gke.go index 37d6964326..7c448aa891 100644 --- a/e2e/nomostest/clusters/gke.go +++ b/e2e/nomostest/clusters/gke.go @@ -238,6 +238,9 @@ func createGKECluster(t testing.NTB, name string) error { if len(addons) > 0 { args = append(args, "--addons", strings.Join(addons, ",")) } + if *e2e.VPA { + args = append(args, "--enable-vertical-pod-autoscaling") + } } t.Logf("gcloud %s", strings.Join(args, " ")) cmd := exec.Command("gcloud", args...) diff --git a/e2e/nomostest/new.go b/e2e/nomostest/new.go index b12a0b4196..4055772685 100644 --- a/e2e/nomostest/new.go +++ b/e2e/nomostest/new.go @@ -85,6 +85,10 @@ func newOptStruct(testName, tmpDir string, t nomostesting.NTB, ntOptions ...ntop t.Skip("Test skipped since it is a KCC test") } + if !*e2e.VPA && optsStruct.VPATest { + t.Skip("Test skipped since it is a VPA test") + } + if !*e2e.GceNode && optsStruct.GCENodeTest { t.Skip("Test skipped since it is a test for GCENode auth type, which requires a GKE cluster without workload identity") } diff --git a/e2e/nomostest/ntopts/test_type.go b/e2e/nomostest/ntopts/test_type.go index d9e396d6fb..696208c355 100644 --- a/e2e/nomostest/ntopts/test_type.go +++ b/e2e/nomostest/ntopts/test_type.go @@ -25,6 +25,9 @@ type TestType struct { // KCCTest specifies the test is for KCC resources. KCCTest bool + // VPATest specifies the test requires VPA to be installed. + VPATest bool + // GCENodeTest specifies the test is for verifying the gcenode auth type. // It requires a GKE cluster with workload identity disabled. GCENodeTest bool @@ -45,6 +48,11 @@ func KCCTest(opt *New) { opt.KCCTest = true } +// VPATest specifies the test requires VPA to be installed. +func VPATest(opt *New) { + opt.VPATest = true +} + // GCENodeTest specifies the test is for verifying the gcenode auth type. func GCENodeTest(opt *New) { opt.GCENodeTest = true diff --git a/e2e/nomostest/testpredicates/predicates.go b/e2e/nomostest/testpredicates/predicates.go index 5867eceb78..798df378c6 100644 --- a/e2e/nomostest/testpredicates/predicates.go +++ b/e2e/nomostest/testpredicates/predicates.go @@ -256,6 +256,16 @@ func HasCorrectResourceRequestsLimits(containerName string, cpuRequest, cpuLimit if o == nil { return ErrObjectNotFound } + if uObj, ok := o.(*unstructured.Unstructured); ok { + rObj, err := kinds.ToTypedObject(uObj, core.Scheme) + if err != nil { + return err + } + o, err = kinds.ObjectAsClientObject(rObj) + if err != nil { + return err + } + } dep, ok := o.(*appsv1.Deployment) if !ok { return WrongTypeErr(dep, &appsv1.Deployment{}) diff --git a/e2e/testcases/stress_test.go b/e2e/testcases/stress_test.go index 889f2e40cc..00e87f6e03 100644 --- a/e2e/testcases/stress_test.go +++ b/e2e/testcases/stress_test.go @@ -23,10 +23,12 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + autoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "kpt.dev/configsync/e2e/nomostest" "kpt.dev/configsync/e2e/nomostest/ntopts" nomostesting "kpt.dev/configsync/e2e/nomostest/testing" @@ -38,6 +40,8 @@ import ( "kpt.dev/configsync/pkg/kinds" "kpt.dev/configsync/pkg/metadata" "kpt.dev/configsync/pkg/testing/fake" + "kpt.dev/configsync/pkg/util/log" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -121,7 +125,7 @@ func TestStressCRD(t *testing.T) { } nt.T.Logf("Verify that there are exactly 1000 CronTab CRs managed by Config Sync on the cluster") - crList := &unstructured.UnstructuredList{} + crList := &metav1.PartialObjectMetadataList{} crList.SetGroupVersionKind(crontabGVK) if err := nt.KubeClient.List(crList, client.MatchingLabels{metadata.ManagedByKey: metadata.ManagedByValue}); err != nil { nt.T.Error(err) @@ -161,19 +165,118 @@ func TestStressLargeNamespace(t *testing.T) { } nt.T.Log("Verify there are 5000 ConfigMaps in the namespace") - cmList := &corev1.ConfigMapList{} + cmList := &metav1.PartialObjectMetadataList{} + cmList.SetGroupVersionKind(kinds.ConfigMap()) + if err := nt.KubeClient.List(cmList, &client.ListOptions{Namespace: ns}, client.MatchingLabels{labelKey: labelValue}); err != nil { + nt.T.Error(err) + } + if len(cmList.Items) != 5000 { + nt.T.Errorf("The %s namespace should include 5000 ConfigMaps having the `%s: %s` label exactly, found %v instead", ns, labelKey, labelValue, len(cmList.Items)) + } +} + +// TestStressLargeNamespaceAutoscaling tests that Config Sync can sync a +// namespace including 5000 resources successfully, when autoscaling it set to +// Auto, with the smaller initial resource requests. +// Ideally, the reconciler should be OOMKilled and/or evicted at least once and +// be replaced with more CPU/Mem. +func TestStressLargeNamespaceAutoscaling(t *testing.T) { + nt := nomostest.New(t, nomostesting.Reconciliation1, ntopts.Unstructured, ntopts.StressTest, + ntopts.WithReconcileTimeout(configsync.DefaultReconcileTimeout)) + nt.T.Log("Stop the CS webhook by removing the webhook configuration") + nomostest.StopWebhook(nt) + + nt.T.Log("Enable autoscaling") + rootSync := fake.RootSyncObjectV1Beta1(configsync.RootSyncName) + if err := nt.KubeClient.Get(rootSync.Name, rootSync.Namespace, rootSync); err != nil { + nt.T.Fatal(err) + } + core.SetAnnotation(rootSync, metadata.ReconcilerAutoscalingStrategyAnnotationKey, string(metadata.ReconcilerAutoscalingStrategyAuto)) + reconcilerResourceSpec := v1beta1.ContainerResourcesSpec{ + ContainerName: "reconciler", + CPURequest: resource.MustParse("10m"), + CPULimit: resource.MustParse("1"), + MemoryRequest: resource.MustParse("5Mi"), + MemoryLimit: resource.MustParse("10Mi"), + } + rootSync.Spec.Override.Resources = []v1beta1.ContainerResourcesSpec{ + reconcilerResourceSpec, + } + if err := nt.KubeClient.Update(rootSync); err != nil { + nt.T.Fatal(err) + } + + // Wait for the reconciler Deployment to reflect the RootSync changes + reconcilerKey := core.RootReconcilerObjectKey(configsync.RootSyncName) + err := nt.Watcher.WatchObject(kinds.Deployment(), reconcilerKey.Name, reconcilerKey.Namespace, []testpredicates.Predicate{ + testpredicates.HasCorrectResourceRequestsLimits(reconcilerResourceSpec.ContainerName, + reconcilerResourceSpec.CPURequest, reconcilerResourceSpec.CPULimit, + reconcilerResourceSpec.MemoryRequest, reconcilerResourceSpec.MemoryLimit), + testpredicates.StatusEquals(nt.Scheme, kstatus.CurrentStatus), + }) + if err != nil { + nt.T.Fatal(err) + } + + if err := nt.WatchForAllSyncs(); err != nil { + nt.T.Fatal(err) + } + reconcilerPod, err := nt.KubeClient.GetDeploymentPod(reconcilerKey.Name, reconcilerKey.Namespace, nt.DefaultWaitTimeout) + if err != nil { + nt.T.Fatal(err) + } + nt.T.Log("Reconciler container specs (before):") + for _, container := range reconcilerPod.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } + + ns := "my-ns-1" + nt.Must(nt.RootRepos[configsync.RootSyncName].Add("acme/ns.yaml", fake.NamespaceObject(ns))) + + labelKey := "StressTestName" + labelValue := "TestStressLargeNamespace" + for i := 1; i <= 5000; i++ { + nt.Must(nt.RootRepos[configsync.RootSyncName].Add(fmt.Sprintf("acme/cm-%d.yaml", i), fake.ConfigMapObject( + core.Name(fmt.Sprintf("cm-%d", i)), core.Namespace(ns), core.Label(labelKey, labelValue)))) + } + nt.Must(nt.RootRepos[configsync.RootSyncName].CommitAndPush("Add 5000 ConfigMaps and 1 Namespace")) + err = nt.WatchForAllSyncs(nomostest.WithTimeout(10 * time.Minute)) + if err != nil { + nt.T.Fatal(err) + } + + nt.T.Log("Verify there are 5000 ConfigMaps in the namespace") + cmList := &metav1.PartialObjectMetadataList{} + cmList.SetGroupVersionKind(kinds.ConfigMap()) if err := nt.KubeClient.List(cmList, &client.ListOptions{Namespace: ns}, client.MatchingLabels{labelKey: labelValue}); err != nil { nt.T.Error(err) } if len(cmList.Items) != 5000 { nt.T.Errorf("The %s namespace should include 5000 ConfigMaps having the `%s: %s` label exactly, found %v instead", ns, labelKey, labelValue, len(cmList.Items)) } + + reconcilerPod, err = nt.KubeClient.GetDeploymentPod(reconcilerKey.Name, reconcilerKey.Namespace, nt.DefaultWaitTimeout) + if err != nil { + nt.T.Fatal(err) + } + nt.T.Log("Reconciler container specs (after):") + for _, container := range reconcilerPod.Spec.Containers { + nt.T.Logf("%s: %s", container.Name, log.AsJSON(container.Resources)) + } + + vpa := &autoscalingv1.VerticalPodAutoscaler{} + if err := nt.KubeClient.Get(reconcilerKey.Name, reconcilerKey.Namespace, vpa); err != nil { + nt.T.Fatal(err) + } + nt.T.Log("Reconciler VPA recommendations:") + nt.T.Log(log.AsYAML(vpa.Status.Recommendation.ContainerRecommendations)) } // TestStressFrequentGitCommits adds 100 Git commits, and verifies that Config Sync can sync the changes in these commits successfully. func TestStressFrequentGitCommits(t *testing.T) { - nt := nomostest.New(t, nomostesting.Reconciliation1, ntopts.Unstructured, ntopts.StressTest, + nt := nomostest.New(t, nomostesting.Reconciliation1, ntopts.Unstructured, + ntopts.StressTest, ntopts.VPATest, ntopts.WithReconcileTimeout(configsync.DefaultReconcileTimeout)) nt.T.Log("Stop the CS webhook by removing the webhook configuration") nomostest.StopWebhook(nt) @@ -200,7 +303,8 @@ func TestStressFrequentGitCommits(t *testing.T) { } nt.T.Logf("Verify that there are exactly 100 ConfigMaps under the %s namespace", ns) - cmList := &corev1.ConfigMapList{} + cmList := &metav1.PartialObjectMetadataList{} + cmList.SetGroupVersionKind(kinds.ConfigMap()) if err := nt.KubeClient.List(cmList, &client.ListOptions{Namespace: ns}, client.MatchingLabels{labelKey: labelValue}); err != nil { nt.T.Error(err) } diff --git a/pkg/core/scheme.go b/pkg/core/scheme.go index dc21453f62..840b144853 100644 --- a/pkg/core/scheme.go +++ b/pkg/core/scheme.go @@ -17,6 +17,7 @@ package core import ( admissionv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" + autoscalingv1hpa "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -26,8 +27,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - // autoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + autoscalingv1vpa "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/client-go/kubernetes/scheme" clusterregistry "k8s.io/cluster-registry/pkg/apis/clusterregistry/v1alpha1" k8sadmissionv1 "k8s.io/kubernetes/pkg/apis/admission/v1" @@ -62,7 +62,7 @@ var Scheme = scheme.Scheme func init() { mustRegisterKubernetesResources() mustRegisterAPIExtensionsResources() - // mustRegisterAutosclaingResources() + mustRegisterAutoscalingResources() // Config Sync types utilruntime.Must(clusterregistry.AddToScheme(scheme.Scheme)) @@ -126,7 +126,8 @@ func mustRegisterAPIExtensionsResources() { utilruntime.Must(scheme.Scheme.SetVersionPriority(apiextensionsv1.SchemeGroupVersion, apiextensionsv1beta1.SchemeGroupVersion)) } -// func mustRegisterAutosclaingResources() { -// utilruntime.Must(autoscalingv1.AddToScheme(scheme.Scheme)) -// // autoscaling API has no generated defaults or conversions -// } +func mustRegisterAutoscalingResources() { + utilruntime.Must(autoscalingv1hpa.AddToScheme(scheme.Scheme)) + utilruntime.Must(autoscalingv1vpa.AddToScheme(scheme.Scheme)) + // autoscaling API has no generated defaults or conversions +} diff --git a/pkg/reconcilermanager/controllers/reposync_controller.go b/pkg/reconcilermanager/controllers/reposync_controller.go index 2ca759b1cf..91573f01a7 100644 --- a/pkg/reconcilermanager/controllers/reposync_controller.go +++ b/pkg/reconcilermanager/controllers/reposync_controller.go @@ -225,13 +225,13 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile // Create secret in config-management-system namespace using the // existing secret in the reposync.namespace. if _, err := r.upsertAuthSecret(ctx, rs, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "upserting auth secret") } // Create secret in config-management-system namespace using the // existing secret in the reposync.namespace. if _, err := r.upsertCACertSecret(ctx, rs, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "upserting CA cert secret") } labelMap := map[string]string{ @@ -258,32 +258,32 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile return errors.Errorf("invalid source type: %s", rs.Spec.SourceType) } if _, err := r.upsertServiceAccount(ctx, reconcilerRef, auth, gcpSAEmail, labelMap); err != nil { - return err + return errors.Wrap(err, "upserting service account") } // Overwrite reconciler rolebinding. if _, err := r.upsertRoleBinding(ctx, reconcilerRef, rsRef); err != nil { - return err + return errors.Wrap(err, "upserting role binding") } // Upsert autoscaling config for the reconciler deployment autoscalingStrategy := reconcilerAutoscalingStrategy(rs) _, autoscale, err := r.upsertVerticalPodAutoscaler(ctx, autoscalingStrategy, reconcilerRef, labelMap) if err != nil { - return err + return errors.Wrap(err, "upserting autoscaler") } containerEnvs := r.populateContainerEnvs(ctx, rs, reconcilerRef.Name) newValuesFrom, err := copyConfigMapsToCms(ctx, r.client, rs) if err != nil { - return errors.Errorf("unable to copy ConfigMapRefs to config-management-system namespace: %s", err.Error()) + return errors.Wrap(err, "copying ConfigMapRefs to config-management-system namespace") } mut := r.mutationsFor(ctx, rs, containerEnvs, newValuesFrom, autoscale) // Upsert Namespace reconciler deployment. deployObj, op, err := r.upsertDeployment(ctx, reconcilerRef, labelMap, mut) if err != nil { - return err + return errors.Wrap(err, "upserting reconciler deployment") } rs.Status.Reconciler = reconcilerRef.Name @@ -292,7 +292,7 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile if op == controllerutil.OperationResultNone { deployObj, err = r.deployment(ctx, reconcilerRef) if err != nil { - return err + return errors.Wrap(err, "getting reconciler deployment") } } @@ -303,7 +303,7 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile result, err := kstatus.Compute(deployObj) if err != nil { - return errors.Wrap(err, "computing reconciler Deployment status failed") + return errors.Wrap(err, "computing reconciler deployment status") } r.logger(ctx).V(3).Info("Reconciler status", @@ -329,6 +329,9 @@ func (r *RepoSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile // - Update the RepoSync status func (r *RepoSyncReconciler) setup(ctx context.Context, reconcilerRef, rsRef types.NamespacedName, currentRS, rs *v1beta1.RepoSync) error { err := r.upsertManagedObjects(ctx, reconcilerRef, rs) + if err != nil { + err = errors.Wrap(err, "upserting managed objects") + } err = r.handleReconcileError(ctx, err, rs, "Setup") updated, updateErr := r.updateStatus(ctx, currentRS, rs) if updateErr != nil { @@ -357,6 +360,9 @@ func (r *RepoSyncReconciler) setup(ctx context.Context, reconcilerRef, rsRef typ // - Update the RepoSync status func (r *RepoSyncReconciler) teardown(ctx context.Context, reconcilerRef, rsRef types.NamespacedName, currentRS, rs *v1beta1.RepoSync) error { err := r.deleteManagedObjects(ctx, reconcilerRef, rsRef) + if err != nil { + err = errors.Wrap(err, "deleting managed objects") + } err = r.handleReconcileError(ctx, err, rs, "Teardown") updated, updateErr := r.updateStatus(ctx, currentRS, rs) if updateErr != nil { @@ -432,11 +438,11 @@ func (r *RepoSyncReconciler) deleteManagedObjects(ctx context.Context, reconcile r.logger(ctx).Info("Deleting managed objects") if err := r.deleteVerticalPodAutoscaler(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting autoscaler") } if err := r.deleteDeployment(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting reconciler deployment") } // Note: ConfigMaps have been replaced by Deployment env vars. @@ -444,18 +450,22 @@ func (r *RepoSyncReconciler) deleteManagedObjects(ctx context.Context, reconcile // This deletion remains to clean up after users upgrade. if err := r.deleteConfigMaps(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting config maps") } if err := r.deleteSecrets(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting secrets") } if err := r.deleteRoleBinding(ctx, reconcilerRef, rsRef); err != nil { - return err + return errors.Wrap(err, "deleting role bindings") + } + + if err := r.deleteServiceAccount(ctx, reconcilerRef); err != nil { + return errors.Wrap(err, "deleting service account") } - return r.deleteServiceAccount(ctx, reconcilerRef) + return nil } // SetupWithManager registers RepoSync controller with reconciler-manager. diff --git a/pkg/reconcilermanager/controllers/rootsync_controller.go b/pkg/reconcilermanager/controllers/rootsync_controller.go index 2609b0263d..9d3020ac41 100644 --- a/pkg/reconcilermanager/controllers/rootsync_controller.go +++ b/pkg/reconcilermanager/controllers/rootsync_controller.go @@ -240,19 +240,19 @@ func (r *RootSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile return errors.Errorf("invalid source type: %s", rs.Spec.SourceType) } if _, err := r.upsertServiceAccount(ctx, reconcilerRef, auth, gcpSAEmail, labelMap); err != nil { - return err + return errors.Wrap(err, "upserting service account") } // Overwrite reconciler clusterrolebinding. if _, err := r.upsertClusterRoleBinding(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "upserting cluster role binding") } // Upsert autoscaling config for the reconciler deployment autoscalingStrategy := reconcilerAutoscalingStrategy(rs) _, autoscale, err := r.upsertVerticalPodAutoscaler(ctx, autoscalingStrategy, reconcilerRef, labelMap) if err != nil { - return err + return errors.Wrap(err, "upserting autoscaler") } containerEnvs := r.populateContainerEnvs(ctx, rs, reconcilerRef.Name) @@ -261,7 +261,7 @@ func (r *RootSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile // Upsert Root reconciler deployment. deployObj, op, err := r.upsertDeployment(ctx, reconcilerRef, labelMap, mut) if err != nil { - return err + return errors.Wrap(err, "upserting reconciler deployment") } rs.Status.Reconciler = reconcilerRef.Name @@ -270,7 +270,7 @@ func (r *RootSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile if op == controllerutil.OperationResultNone { deployObj, err = r.deployment(ctx, reconcilerRef) if err != nil { - return err + return errors.Wrap(err, "getting reconciler deployment") } } @@ -281,7 +281,7 @@ func (r *RootSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile result, err := kstatus.Compute(deployObj) if err != nil { - return errors.Wrap(err, "computing reconciler Deployment status failed") + return errors.Wrap(err, "computing reconciler deployment status") } r.logger(ctx).V(3).Info("Reconciler status", @@ -307,6 +307,9 @@ func (r *RootSyncReconciler) upsertManagedObjects(ctx context.Context, reconcile // - Update the RootSync status func (r *RootSyncReconciler) setup(ctx context.Context, reconcilerRef, rsRef types.NamespacedName, currentRS, rs *v1beta1.RootSync) error { err := r.upsertManagedObjects(ctx, reconcilerRef, rs) + if err != nil { + err = errors.Wrap(err, "upserting managed objects") + } err = r.handleReconcileError(ctx, err, rs, "Setup") updated, updateErr := r.updateStatus(ctx, currentRS, rs) if updateErr != nil { @@ -335,6 +338,9 @@ func (r *RootSyncReconciler) setup(ctx context.Context, reconcilerRef, rsRef typ // - Update the RootSync status func (r *RootSyncReconciler) teardown(ctx context.Context, reconcilerRef, rsRef types.NamespacedName, currentRS, rs *v1beta1.RootSync) error { err := r.deleteManagedObjects(ctx, reconcilerRef) + if err != nil { + err = errors.Wrap(err, "deleting managed objects") + } err = r.handleReconcileError(ctx, err, rs, "Teardown") updated, updateErr := r.updateStatus(ctx, currentRS, rs) if updateErr != nil { @@ -410,11 +416,11 @@ func (r *RootSyncReconciler) deleteManagedObjects(ctx context.Context, reconcile r.logger(ctx).Info("Deleting managed objects") if err := r.deleteVerticalPodAutoscaler(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting autoscaler") } if err := r.deleteDeployment(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting reconciler deployment") } // Note: ConfigMaps have been replaced by Deployment env vars. @@ -422,17 +428,21 @@ func (r *RootSyncReconciler) deleteManagedObjects(ctx context.Context, reconcile // This deletion remains to clean up after users upgrade. if err := r.deleteConfigMaps(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting config maps") } // Note: ReconcilerManager doesn't manage the RootSync Secret. // So we don't need to delete it here. if err := r.deleteClusterRoleBinding(ctx, reconcilerRef); err != nil { - return err + return errors.Wrap(err, "deleting cluster role bindings") + } + + if err := r.deleteServiceAccount(ctx, reconcilerRef); err != nil { + return errors.Wrap(err, "deleting service account") } - return r.deleteServiceAccount(ctx, reconcilerRef) + return nil } // SetupWithManager registers RootSync controller with reconciler-manager.