diff --git a/controllers/machine_controller.go b/controllers/machine_controller.go index 7c85191e5607..4b7b4973d9c1 100644 --- a/controllers/machine_controller.go +++ b/controllers/machine_controller.go @@ -36,7 +36,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/cluster-api/controllers/remote" - capierrors "sigs.k8s.io/cluster-api/errors" kubedrain "sigs.k8s.io/cluster-api/third_party/kubernetes-drain" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -333,10 +332,12 @@ func (r *MachineReconciler) reconcileDelete(ctx context.Context, cluster *cluste return ctrl.Result{}, errors.Wrap(err, "failed to patch Machine") } - if err := r.drainNode(ctx, cluster, m.Status.NodeRef.Name); err != nil { - conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDrainNode", "error draining Machine's node %q: %v", m.Status.NodeRef.Name, err) - return ctrl.Result{}, err + if result, err := r.drainNode(ctx, cluster, m.Status.NodeRef.Name); !result.IsZero() || err != nil { + if err != nil { + conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDrainNode", "error draining Machine's node %q: %v", m.Status.NodeRef.Name, err) + } + return result, err } conditions.MarkTrue(m, clusterv1.DrainingSucceededCondition) @@ -489,18 +490,18 @@ func (r *MachineReconciler) isDeleteNodeAllowed(ctx context.Context, cluster *cl } } -func (r *MachineReconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) error { +func (r *MachineReconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name, "node", nodeName) restConfig, err := remote.RESTConfig(ctx, MachineControllerName, r.Client, util.ObjectKey(cluster)) if err != nil { log.Error(err, "Error creating a remote client while deleting Machine, won't retry") - return nil + return ctrl.Result{}, nil } kubeClient, err := kubernetes.NewForConfig(restConfig) if err != nil { log.Error(err, "Error creating a remote client while deleting Machine, won't retry") - return nil + return ctrl.Result{}, nil } node, err := kubeClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) @@ -508,9 +509,9 @@ func (r *MachineReconciler) drainNode(ctx context.Context, cluster *clusterv1.Cl if apierrors.IsNotFound(err) { // If an admin deletes the node directly, we'll end up here. log.Error(err, "Could not find node from noderef, it may have already been deleted") - return nil + return ctrl.Result{}, nil } - return errors.Errorf("unable to get node %q: %v", nodeName, err) + return ctrl.Result{}, errors.Errorf("unable to get node %q: %v", nodeName, err) } drainer := &kubedrain.Helper{ @@ -543,17 +544,17 @@ func (r *MachineReconciler) drainNode(ctx context.Context, cluster *clusterv1.Cl if err := kubedrain.RunCordonOrUncordon(ctx, drainer, node, true); err != nil { // Machine will be re-reconciled after a cordon failure. log.Error(err, "Cordon failed") - return errors.Errorf("unable to cordon node %s: %v", node.Name, err) + return ctrl.Result{}, errors.Errorf("unable to cordon node %s: %v", node.Name, err) } if err := kubedrain.RunNodeDrain(ctx, drainer, node.Name); err != nil { // Machine will be re-reconciled after a drain failure. - log.Error(err, "Drain failed") - return &capierrors.RequeueAfterError{RequeueAfter: 20 * time.Second} + log.Error(err, "Drain failed, retry in 20s") + return ctrl.Result{RequeueAfter: 20 * time.Second}, nil } log.Info("Drain successful") - return nil + return ctrl.Result{}, nil } func (r *MachineReconciler) deleteNode(ctx context.Context, cluster *clusterv1.Cluster, name string) error { diff --git a/controllers/machine_controller_phases.go b/controllers/machine_controller_phases.go index a0cd730f1b7f..bd707944fd9f 100644 --- a/controllers/machine_controller_phases.go +++ b/controllers/machine_controller_phases.go @@ -19,7 +19,6 @@ package controllers import ( "context" "fmt" - "strings" "time" "github.com/pkg/errors" @@ -97,9 +96,8 @@ func (r *MachineReconciler) reconcileExternal(ctx context.Context, cluster *clus obj, err := external.Get(ctx, r.Client, ref, m.Namespace) if err != nil { if apierrors.IsNotFound(errors.Cause(err)) { - return external.ReconcileOutput{}, errors.Wrapf(&capierrors.RequeueAfterError{RequeueAfter: externalReadyWait}, - "could not find %v %q for Machine %q in namespace %q, requeuing", - ref.GroupVersionKind(), ref.Name, m.Name, m.Namespace) + log.Info("could not find external ref, requeueing", "RefGVK", ref.GroupVersionKind(), "RefName", ref.Name, "Machine", m.Name, "Namespace", m.Namespace) + return external.ReconcileOutput{RequeueAfter: externalReadyWait}, nil } return external.ReconcileOutput{}, err } @@ -192,6 +190,9 @@ func (r *MachineReconciler) reconcileBootstrap(ctx context.Context, cluster *clu if err != nil { return ctrl.Result{}, err } + if externalResult.RequeueAfter > 0 { + return ctrl.Result{RequeueAfter: externalResult.RequeueAfter}, nil + } if externalResult.Paused { return ctrl.Result{}, nil } @@ -240,14 +241,18 @@ func (r *MachineReconciler) reconcileInfrastructure(ctx context.Context, cluster // Call generic external reconciler. infraReconcileResult, err := r.reconcileExternal(ctx, cluster, m, &m.Spec.InfrastructureRef) if err != nil { - if m.Status.InfrastructureReady && strings.Contains(err.Error(), "could not find") { - // Infra object went missing after the machine was up and running + return ctrl.Result{}, err + } + if infraReconcileResult.RequeueAfter > 0 { + // Infra object went missing after the machine was up and running + if m.Status.InfrastructureReady { log.Error(err, "Machine infrastructure reference has been deleted after being ready, setting failure state") m.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.InvalidConfigurationMachineError) m.Status.FailureMessage = pointer.StringPtr(fmt.Sprintf("Machine infrastructure resource %v with name %q has been deleted after being ready", m.Spec.InfrastructureRef.GroupVersionKind(), m.Spec.InfrastructureRef.Name)) + return ctrl.Result{}, errors.Errorf("could not find %v %q for Machine %q in namespace %q, requeueing", m.Spec.InfrastructureRef.GroupVersionKind().String(), m.Spec.InfrastructureRef.Name, m.Name, m.Namespace) } - return ctrl.Result{}, err + return ctrl.Result{RequeueAfter: infraReconcileResult.RequeueAfter}, nil } // if the external object is paused, return without any further processing if infraReconcileResult.Paused { diff --git a/controllers/machine_controller_phases_test.go b/controllers/machine_controller_phases_test.go index b02abee2a170..5eb6d192f0ae 100644 --- a/controllers/machine_controller_phases_test.go +++ b/controllers/machine_controller_phases_test.go @@ -587,9 +587,9 @@ func TestReconcileBootstrap(t *testing.T) { name string bootstrapConfig map[string]interface{} machine *clusterv1.Machine + expectResult ctrl.Result expectError bool expected func(g *WithT, m *clusterv1.Machine) - result *ctrl.Result }{ { name: "new machine, bootstrap config ready with data", @@ -606,7 +606,8 @@ func TestReconcileBootstrap(t *testing.T) { "dataSecretName": "secret-data", }, }, - expectError: false, + expectResult: ctrl.Result{}, + expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.BootstrapReady).To(BeTrue()) g.Expect(m.Spec.Bootstrap.DataSecretName).ToNot(BeNil()) @@ -627,7 +628,8 @@ func TestReconcileBootstrap(t *testing.T) { "ready": true, }, }, - expectError: true, + expectResult: ctrl.Result{}, + expectError: true, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.BootstrapReady).To(BeFalse()) g.Expect(m.Spec.Bootstrap.DataSecretName).To(BeNil()) @@ -645,8 +647,8 @@ func TestReconcileBootstrap(t *testing.T) { "spec": map[string]interface{}{}, "status": map[string]interface{}{}, }, - expectError: false, - result: &ctrl.Result{RequeueAfter: externalReadyWait}, + expectResult: ctrl.Result{RequeueAfter: externalReadyWait}, + expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.BootstrapReady).To(BeFalse()) }, @@ -663,7 +665,8 @@ func TestReconcileBootstrap(t *testing.T) { "spec": map[string]interface{}{}, "status": map[string]interface{}{}, }, - expectError: true, + expectResult: ctrl.Result{RequeueAfter: externalReadyWait}, + expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.BootstrapReady).To(BeFalse()) }, @@ -680,7 +683,8 @@ func TestReconcileBootstrap(t *testing.T) { "spec": map[string]interface{}{}, "status": map[string]interface{}{}, }, - expectError: true, + expectResult: ctrl.Result{RequeueAfter: externalReadyWait}, + expectError: false, }, { name: "existing machine, bootstrap data should not change", @@ -716,7 +720,8 @@ func TestReconcileBootstrap(t *testing.T) { BootstrapReady: true, }, }, - expectError: false, + expectResult: ctrl.Result{}, + expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.BootstrapReady).To(BeTrue()) g.Expect(*m.Spec.Bootstrap.DataSecretName).To(BeEquivalentTo("secret-data")) @@ -763,8 +768,8 @@ func TestReconcileBootstrap(t *testing.T) { BootstrapReady: true, }, }, - expectError: false, - result: &ctrl.Result{RequeueAfter: externalReadyWait}, + expectResult: ctrl.Result{RequeueAfter: externalReadyWait}, + expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.GetOwnerReferences()).NotTo(ContainRefOfGroupKind("cluster.x-k8s.io", "MachineSet")) }, @@ -810,7 +815,8 @@ func TestReconcileBootstrap(t *testing.T) { BootstrapReady: true, }, }, - expectError: true, + expectResult: ctrl.Result{}, + expectError: true, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.GetOwnerReferences()).NotTo(ContainRefOfGroupKind("cluster.x-k8s.io", "MachineSet")) }, @@ -839,6 +845,7 @@ func TestReconcileBootstrap(t *testing.T) { } res, err := r.reconcileBootstrap(ctx, defaultCluster, tc.machine) + g.Expect(res).To(Equal(tc.expectResult)) if tc.expectError { g.Expect(err).ToNot(BeNil()) } else { @@ -848,10 +855,6 @@ func TestReconcileBootstrap(t *testing.T) { if tc.expected != nil { tc.expected(g, tc.machine) } - - if tc.result != nil { - g.Expect(res).To(Equal(*tc.result)) - } }) } } @@ -889,14 +892,14 @@ func TestReconcileInfrastructure(t *testing.T) { } testCases := []struct { - name string - bootstrapConfig map[string]interface{} - infraConfig map[string]interface{} - machine *clusterv1.Machine - expectError bool - expectChanged bool - expectRequeueAfter bool - expected func(g *WithT, m *clusterv1.Machine) + name string + bootstrapConfig map[string]interface{} + infraConfig map[string]interface{} + machine *clusterv1.Machine + expectResult ctrl.Result + expectError bool + expectChanged bool + expected func(g *WithT, m *clusterv1.Machine) }{ { name: "new machine, infrastructure config ready", @@ -933,6 +936,7 @@ func TestReconcileInfrastructure(t *testing.T) { }, }, }, + expectResult: ctrl.Result{}, expectError: false, expectChanged: true, expected: func(g *WithT, m *clusterv1.Machine) { @@ -985,8 +989,8 @@ func TestReconcileInfrastructure(t *testing.T) { "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha4", "metadata": map[string]interface{}{}, }, - expectError: true, - expectRequeueAfter: true, + expectResult: ctrl.Result{}, + expectError: true, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.InfrastructureReady).To(BeTrue()) g.Expect(m.Status.FailureMessage).ToNot(BeNil()) @@ -1023,6 +1027,7 @@ func TestReconcileInfrastructure(t *testing.T) { }, }, }, + expectResult: ctrl.Result{}, expectError: false, expectChanged: false, expected: func(g *WithT, m *clusterv1.Machine) { @@ -1052,8 +1057,9 @@ func TestReconcileInfrastructure(t *testing.T) { ).Build(), } - _, err := r.reconcileInfrastructure(ctx, defaultCluster, tc.machine) + result, err := r.reconcileInfrastructure(ctx, defaultCluster, tc.machine) r.reconcilePhase(ctx, tc.machine) + g.Expect(result).To(Equal(tc.expectResult)) if tc.expectError { g.Expect(err).ToNot(BeNil()) } else { diff --git a/controlplane/kubeadm/controllers/controller.go b/controlplane/kubeadm/controllers/controller.go index a737347992a3..2ebe94ca7678 100644 --- a/controlplane/kubeadm/controllers/controller.go +++ b/controlplane/kubeadm/controllers/controller.go @@ -36,7 +36,6 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/machinefilters" - capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -161,13 +160,6 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. } defer func() { - if requeueErr, ok := errors.Cause(reterr).(capierrors.HasRequeueAfterError); ok { - if res.RequeueAfter == 0 { - res.RequeueAfter = requeueErr.GetRequeueAfter() - reterr = nil - } - } - // Always attempt to update status. if err := r.updateStatus(ctx, kcp, cluster); err != nil { var connFailure *internal.RemoteClusterConnectionError @@ -265,9 +257,11 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * } // Generate Cluster Kubeconfig if needed - if err := r.reconcileKubeconfig(ctx, cluster, kcp); err != nil { - log.Error(err, "failed to reconcile Kubeconfig") - return ctrl.Result{}, err + if result, err := r.reconcileKubeconfig(ctx, cluster, kcp); !result.IsZero() || err != nil { + if err != nil { + log.Error(err, "failed to reconcile Kubeconfig") + } + return result, err } controlPlaneMachines, err := r.managementClusterUncached.GetMachinesForCluster(ctx, util.ObjectKey(cluster), machinefilters.ControlPlaneMachines(cluster.Name)) diff --git a/controlplane/kubeadm/controllers/helpers.go b/controlplane/kubeadm/controllers/helpers.go index 907875730e2d..cac2d01ccd45 100644 --- a/controlplane/kubeadm/controllers/helpers.go +++ b/controlplane/kubeadm/controllers/helpers.go @@ -33,7 +33,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" - capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/conditions" @@ -43,12 +42,12 @@ import ( ctrl "sigs.k8s.io/controller-runtime" ) -func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) error { +func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) endpoint := cluster.Spec.ControlPlaneEndpoint if endpoint.IsZero() { - return nil + return ctrl.Result{}, nil } controllerOwnerRef := *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")) @@ -64,41 +63,40 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, controllerOwnerRef, ) if errors.Is(createErr, kubeconfig.ErrDependentCertificateNotFound) { - return errors.Wrapf(&capierrors.RequeueAfterError{RequeueAfter: dependentCertRequeueAfter}, - "could not find secret %q, requeuing", secret.ClusterCA) + return ctrl.Result{RequeueAfter: dependentCertRequeueAfter}, nil } // always return if we have just created in order to skip rotation checks - return createErr + return ctrl.Result{}, createErr case err != nil: - return errors.Wrap(err, "failed to retrieve kubeconfig Secret") + return ctrl.Result{}, errors.Wrap(err, "failed to retrieve kubeconfig Secret") } // check if the kubeconfig secret was created by v1alpha2 controllers, and thus it has the Cluster as the owner instead of KCP; // if yes, adopt it. if util.IsOwnedByObject(configSecret, cluster) && !util.IsControlledBy(configSecret, kcp) { if err := r.adoptKubeconfigSecret(ctx, cluster, configSecret, controllerOwnerRef); err != nil { - return err + return ctrl.Result{}, err } } // only do rotation on owned secrets if !util.IsControlledBy(configSecret, kcp) { - return nil + return ctrl.Result{}, nil } needsRotation, err := kubeconfig.NeedsClientCertRotation(configSecret, certs.ClientCertificateRenewalDuration) if err != nil { - return err + return ctrl.Result{}, err } if needsRotation { log.Info("rotating kubeconfig secret") if err := kubeconfig.RegenerateSecret(ctx, r.Client, configSecret); err != nil { - return errors.Wrap(err, "failed to regenerate kubeconfig") + return ctrl.Result{}, errors.Wrap(err, "failed to regenerate kubeconfig") } } - return nil + return ctrl.Result{}, nil } func (r *KubeadmControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster, configSecret *corev1.Secret, controllerOwnerRef metav1.OwnerReference) error { diff --git a/controlplane/kubeadm/controllers/helpers_test.go b/controlplane/kubeadm/controllers/helpers_test.go index 3ab122ab2fe3..7f817575e79b 100644 --- a/controlplane/kubeadm/controllers/helpers_test.go +++ b/controlplane/kubeadm/controllers/helpers_test.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -76,7 +77,9 @@ func TestReconcileKubeconfigEmptyAPIEndpoints(t *testing.T) { recorder: record.NewFakeRecorder(32), } - g.Expect(r.reconcileKubeconfig(ctx, cluster, kcp)).To(Succeed()) + result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(BeZero()) kubeconfigSecret := &corev1.Secret{} secretName := client.ObjectKey{ @@ -123,7 +126,9 @@ func TestReconcileKubeconfigMissingCACertificate(t *testing.T) { recorder: record.NewFakeRecorder(32), } - g.Expect(r.reconcileKubeconfig(ctx, cluster, kcp)).NotTo(Succeed()) + result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: dependentCertRequeueAfter})) kubeconfigSecret := &corev1.Secret{} secretName := client.ObjectKey{ @@ -181,7 +186,9 @@ func TestReconcileKubeconfigSecretAdoptsV1alpha2Secrets(t *testing.T) { recorder: record.NewFakeRecorder(32), } - g.Expect(r.reconcileKubeconfig(ctx, cluster, kcp)).To(Succeed()) + result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(Equal(ctrl.Result{})) kubeconfigSecret := &corev1.Secret{} secretName := client.ObjectKey{ @@ -243,7 +250,9 @@ func TestReconcileKubeconfigSecretDoesNotAdoptsUserSecrets(t *testing.T) { recorder: record.NewFakeRecorder(32), } - g.Expect(r.reconcileKubeconfig(ctx, cluster, kcp)).To(Succeed()) + result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).To(Succeed()) + g.Expect(result).To(BeZero()) kubeconfigSecret := &corev1.Secret{} secretName := client.ObjectKey{ @@ -300,7 +309,9 @@ func TestKubeadmControlPlaneReconciler_reconcileKubeconfig(t *testing.T) { Client: fakeClient, recorder: record.NewFakeRecorder(32), } - g.Expect(r.reconcileKubeconfig(ctx, cluster, kcp)).To(Succeed()) + result, err := r.reconcileKubeconfig(ctx, cluster, kcp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(result).To(Equal(ctrl.Result{})) kubeconfigSecret := &corev1.Secret{} secretName := client.ObjectKey{ diff --git a/errors/controllers.go b/errors/controllers.go deleted file mode 100644 index 6fc01576fb9c..000000000000 --- a/errors/controllers.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - "fmt" - "time" - - "github.com/pkg/errors" -) - -// HasRequeueAfterError represents that an actuator managed object should -// be requeued for further processing after the given RequeueAfter time has -// passed. -// -// DEPRECATED: This error is deprecated and should not be used for new code. -// See https://github.com/kubernetes-sigs/cluster-api/issues/3370 for more information. -// -// Users should switch their methods and functions to return a (ctrl.Result, error) pair, -// instead of relying on this error. Controller runtime exposes a Result.IsZero() (from 0.5.9, and 0.6.2) -// which can be used from callers to see if reconciliation should be stopped or continue. -type HasRequeueAfterError interface { - // GetRequeueAfter gets the duration to wait until the managed object is - // requeued for further processing. - GetRequeueAfter() time.Duration -} - -// RequeueAfterError represents that an actuator managed object should be -// requeued for further processing after the given RequeueAfter time has -// passed. -// -// DEPRECATED: This error is deprecated and should not be used for new code. -// See https://github.com/kubernetes-sigs/cluster-api/issues/3370 for more information. -// -// Users should switch their methods and functions to return a (ctrl.Result, error) pair, -// instead of relying on this error. Controller runtime exposes a Result.IsZero() (from 0.5.9, and 0.6.2) -// which can be used from callers to see if reconciliation should be stopped or continue. -type RequeueAfterError struct { - RequeueAfter time.Duration -} - -// Error implements the error interface -func (e *RequeueAfterError) Error() string { - return fmt.Sprintf("requeue in %v", e.RequeueAfter) -} - -// GetRequeueAfter gets the duration to wait until the managed object is -// requeued for further processing. -func (e *RequeueAfterError) GetRequeueAfter() time.Duration { - return e.RequeueAfter -} - -// IsRequeueAfter returns true if the error satisfies the interface HasRequeueAfterError. -// -// DEPRECATED: This error is deprecated and should not be used for new code. -// See https://github.com/kubernetes-sigs/cluster-api/issues/3370 for more information. -// -// Users should switch their methods and functions to return a (ctrl.Result, error) pair, -// instead of relying on this error. Controller runtime exposes a Result.IsZero() (from 0.5.9, and 0.6.2) -// which can be used from callers to see if reconciliation should be stopped or continue. -func IsRequeueAfter(err error) bool { - _, ok := errors.Cause(err).(HasRequeueAfterError) - return ok -}