diff --git a/api/v1beta1/kustomization_types.go b/api/v1beta1/kustomization_types.go index f2c0c8ec..81942527 100644 --- a/api/v1beta1/kustomization_types.go +++ b/api/v1beta1/kustomization_types.go @@ -307,7 +307,19 @@ func (in Kustomization) GetDependsOn() (types.NamespacedName, []dependency.Cross }, in.Spec.DependsOn } +// GetConditions returns a slice of the conditions in the +// status of a kustomization +func (in *Kustomization) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the conditions of a kustomization +func (in *Kustomization) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + // GetStatusConditions returns a pointer to the Status.Conditions slice +// Deprecated. func (in *Kustomization) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } diff --git a/controllers/kustomization_controller.go b/controllers/kustomization_controller.go index 3a05106e..2f35b093 100644 --- a/controllers/kustomization_controller.go +++ b/controllers/kustomization_controller.go @@ -20,6 +20,9 @@ import ( "context" "errors" "fmt" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/events" + "github.com/fluxcd/pkg/runtime/patch" "io/ioutil" "net/http" "net/url" @@ -30,21 +33,13 @@ import ( "time" securejoin "github.com/cyphar/filepath-securejoin" - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" - "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/pkg/untar" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" "github.com/go-logr/logr" "github.com/hashicorp/go-retryablehttp" apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" + kerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/cli-utils/pkg/kstatus/polling" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -56,6 +51,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "sigs.k8s.io/kustomize/api/filesys" + "github.com/fluxcd/pkg/apis/meta" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/predicates" + "github.com/fluxcd/pkg/untar" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1beta1" ) @@ -70,13 +71,11 @@ import ( // KustomizationReconciler reconciles a Kustomization object type KustomizationReconciler struct { client.Client - httpClient *retryablehttp.Client - requeueDependency time.Duration - Scheme *runtime.Scheme - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder - StatusPoller *polling.StatusPoller + httpClient *retryablehttp.Client + requeueDependency time.Duration + helper.Events + helper.Metrics + StatusPoller *polling.StatusPoller } type KustomizationReconcilerOptions struct { @@ -127,31 +126,17 @@ func (r *KustomizationReconciler) SetupWithManager(mgr ctrl.Manager, opts Kustom Complete(r) } -func (r *KustomizationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *KustomizationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { log := logr.FromContext(ctx) reconcileStart := time.Now() - var kustomization kustomizev1.Kustomization - if err := r.Get(ctx, req.NamespacedName, &kustomization); err != nil { + kustomization := &kustomizev1.Kustomization{} + if err := r.Get(ctx, req.NamespacedName, kustomization); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, kustomization) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&kustomization, kustomizev1.KustomizationFinalizer) { - controllerutil.AddFinalizer(&kustomization, kustomizev1.KustomizationFinalizer) - if err := r.Update(ctx, &kustomization); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !kustomization.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, kustomization) - } + defer r.RecordSuspend(ctx, kustomization, kustomization.Spec.Suspend) // Return early if the Kustomization is suspended. if kustomization.Spec.Suspend { @@ -159,247 +144,273 @@ func (r *KustomizationReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - // resolve source reference - source, err := r.getSource(ctx, kustomization) + // Initialize the patch helper + patchHelper, err := patch.NewHelper(kustomization, r.Client) if err != nil { - if apierrors.IsNotFound(err) { - msg := fmt.Sprintf("Source '%s' not found", kustomization.Spec.SourceRef.String()) - kustomization = kustomizev1.KustomizationNotReady(kustomization, "", kustomizev1.ArtifactFailedReason, msg) - if err := r.patchStatus(ctx, req, kustomization.Status); err != nil { - log.Error(err, "unable to update status for source not found") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, kustomization) - log.Info(msg) - // do not requeue immediately, when the source is created the watcher should trigger a reconciliation - return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, nil - } else { - // retry on transient errors - return ctrl.Result{Requeue: true}, err - } + return ctrl.Result{}, err } - if source.GetArtifact() == nil { - msg := "Source is not ready, artifact not found" - kustomization = kustomizev1.KustomizationNotReady(kustomization, "", kustomizev1.ArtifactFailedReason, msg) - if err := r.patchStatus(ctx, req, kustomization.Status); err != nil { - log.Error(err, "unable to update status for artifact not found") - return ctrl.Result{Requeue: true}, err + defer func() { + // record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(kustomization.GetAnnotations()); ok { + kustomization.Status.SetLastHandledReconcileRequest(v) + } + + // Summarize Ready condition + conditions.SetSummary(kustomization, + meta.ReadyCondition, + conditions.WithConditions( + sourcev1.ArtifactAvailableCondition, + sourcev1.SourceAvailableCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by + // this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.ArtifactAvailableCondition, + sourcev1.SourceAvailableCondition, + kustomizev1.HealthyCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } + + // Determine if the resource is still being reconciled, or if + // it has stalled, and record this observation + if retErr == nil { + + if result.IsZero() || !result.Requeue { + // We are no longer reconciling + conditions.Delete(kustomization, meta.ReconcilingCondition) + + // We have now observed this generation + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + + readyCondition := conditions.Get(kustomization, meta.ReadyCondition) + switch readyCondition.Status { + case metav1.ConditionFalse: + // As we are no longer reconciling and the end-state + // is not ready, the reconciliation has stalled + conditions.MarkTrue(kustomization, meta.StalledCondition, readyCondition.Reason, readyCondition.Message) + case metav1.ConditionTrue: + // As we are no longer reconciling and the end-state + // is ready, the reconciliation is no longer stalled + conditions.Delete(kustomization, meta.StalledCondition) + } + } } - r.recordReadiness(ctx, kustomization) - log.Info(msg) - // do not requeue immediately, when the artifact is created the watcher should trigger a reconciliation - return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, nil - } - // check dependencies - if len(kustomization.Spec.DependsOn) > 0 { - if err := r.checkDependencies(kustomization); err != nil { - kustomization = kustomizev1.KustomizationNotReady( - kustomization, source.GetArtifact().Revision, meta.DependencyNotReadyReason, err.Error()) - if err := r.patchStatus(ctx, req, kustomization.Status); err != nil { - log.Error(err, "unable to update status for dependency not ready") - return ctrl.Result{Requeue: true}, err - } - // we can't rely on exponential backoff because it will prolong the execution too much, - // instead we requeue on a fix interval. - msg := fmt.Sprintf("Dependencies do not meet ready condition, retrying in %s", r.requeueDependency.String()) - log.Info(msg) - r.event(ctx, kustomization, source.GetArtifact().Revision, events.EventSeverityInfo, msg, nil) - r.recordReadiness(ctx, kustomization) - return ctrl.Result{RequeueAfter: r.requeueDependency}, nil + // Finally, patch the resource + if err := patchHelper.Patch(ctx, kustomization, patchOpts...); err != nil { + retErr = kerrors.NewAggregate([]error{retErr, err}) } - log.Info("All dependencies area ready, proceeding with reconciliation") - } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &kustomization) - if err != nil { + // Always record readiness and duration metrics + r.Metrics.RecordReadinessMetric(ctx, kustomization) + r.Metrics.RecordDuration(ctx, kustomization, reconcileStart) + }() + + // Add our finalizer if it does not exist + if !controllerutil.ContainsFinalizer(kustomization, kustomizev1.KustomizationFinalizer) { + controllerutil.AddFinalizer(kustomization, kustomizev1.KustomizationFinalizer) + if err := r.Update(ctx, kustomization); err != nil { + log.Error(err, "unable to register finalizer") return ctrl.Result{}, err } - defer r.MetricsRecorder.RecordDuration(*objRef, reconcileStart) } - // set the reconciliation status to progressing - kustomization = kustomizev1.KustomizationProgressing(kustomization) - if err := r.patchStatus(ctx, req, kustomization.Status); err != nil { - log.Error(err, "unable to update status to progressing") - return ctrl.Result{Requeue: true}, err + // Examine if the object is under deletion + if !kustomization.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, kustomization) } - r.recordReadiness(ctx, kustomization) - // reconcile kustomization by applying the latest revision - reconciledKustomization, reconcileErr := r.reconcile(ctx, *kustomization.DeepCopy(), source) - if err := r.patchStatus(ctx, req, reconciledKustomization.Status); err != nil { - log.Error(err, "unable to update status after reconciliation") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, reconciledKustomization) - - // broadcast the reconciliation failure and requeue at the specified retry interval - if reconcileErr != nil { - log.Error(reconcileErr, fmt.Sprintf("Reconciliation failed after %s, next try in %s", - time.Now().Sub(reconcileStart).String(), - kustomization.GetRetryInterval().String()), - "revision", - source.GetArtifact().Revision) - r.event(ctx, reconciledKustomization, source.GetArtifact().Revision, events.EventSeverityError, - reconcileErr.Error(), nil) - return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, nil - } - - // broadcast the reconciliation result and requeue at the specified interval - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Now().Sub(reconcileStart).String(), - kustomization.Spec.Interval.Duration.String()), - "revision", - source.GetArtifact().Revision, - ) - r.event(ctx, reconciledKustomization, source.GetArtifact().Revision, events.EventSeverityInfo, - "Update completed", map[string]string{"commit_status": "update"}) - return ctrl.Result{RequeueAfter: kustomization.Spec.Interval.Duration}, nil + return r.reconcile(ctx, kustomization, patchHelper) } func (r *KustomizationReconciler) reconcile( ctx context.Context, - kustomization kustomizev1.Kustomization, - source sourcev1.Source) (kustomizev1.Kustomization, error) { - // record the value of the reconciliation request, if any - if v, ok := meta.ReconcileAnnotationValue(kustomization.GetAnnotations()); ok { - kustomization.Status.SetLastHandledReconcileRequest(v) + kustomization *kustomizev1.Kustomization, patchHelper *patch.Helper) (ctrl.Result, error) { + // Mark the resource as under reconciliation + conditions.MarkTrue(kustomization, meta.ReconcilingCondition, "Reconciling", "") + + var artifact sourcev1.Artifact + if result, err := r.reconcileSource(ctx, kustomization, &artifact); err != nil || conditions.IsFalse(kustomization, sourcev1.SourceAvailableCondition) { + return result, err + } + kustomization.Status.LastAttemptedRevision = artifact.Revision + + if result, err := r.reconcileDependencies(ctx, kustomization, artifact.Revision); err != nil { + return result, err } // create tmp dir tmpDir, err := ioutil.TempDir("", kustomization.Name) if err != nil { err = fmt.Errorf("tmp dir error: %w", err) - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - sourcev1.StorageOperationFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, sourcev1.ArtifactAvailableCondition, sourcev1.StorageOperationFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err } defer os.RemoveAll(tmpDir) // download artifact and extract files - err = r.download(source.GetArtifact().URL, tmpDir) + err = r.download(artifact.URL, tmpDir) if err != nil { - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - kustomizev1.ArtifactFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, sourcev1.ArtifactAvailableCondition, kustomizev1.ArtifactFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err } // check build path exists dirPath, err := securejoin.SecureJoin(tmpDir, kustomization.Spec.Path) if err != nil { - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - kustomizev1.ArtifactFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, meta.ReadyCondition, kustomizev1.ArtifactFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err } if _, err := os.Stat(dirPath); err != nil { err = fmt.Errorf("kustomization path not found: %w", err) - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - kustomizev1.ArtifactFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, meta.ReadyCondition, kustomizev1.ArtifactFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err } // create any necessary kube-clients for impersonation impersonation := NewKustomizeImpersonation(kustomization, r.Client, r.StatusPoller, dirPath) kubeClient, statusPoller, err := impersonation.GetClient(ctx) if err != nil { - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - meta.ReconciliationFailedReason, - err.Error(), - ), fmt.Errorf("failed to build kube client: %w", err) + conditions.MarkFalse(kustomization, meta.ReadyCondition, meta.ReconciliationFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, fmt.Errorf("failed to build kube client: %w", err) } // generate kustomization.yaml and calculate the manifests checksum checksum, err := r.generate(ctx, kubeClient, kustomization, dirPath) if err != nil { - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - kustomizev1.BuildFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, meta.ReadyCondition, kustomizev1.BuildFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err } // build the kustomization and generate the GC snapshot snapshot, err := r.build(ctx, kustomization, checksum, dirPath) if err != nil { - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - kustomizev1.BuildFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, meta.ReadyCondition, kustomizev1.BuildFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err } // dry-run apply err = r.validate(ctx, kustomization, impersonation, dirPath) if err != nil { - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - kustomizev1.ValidationFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, meta.ReadyCondition, kustomizev1.ValidationFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err + } + + // patch kustomization before apply because it takes sometime + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.ArtifactAvailableCondition, + sourcev1.SourceAvailableCondition, + kustomizev1.HealthyCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } + if err := patchHelper.Patch(ctx, kustomization, patchOpts...); err != nil { + return ctrl.Result{Requeue: true}, err } // apply - changeSet, err := r.applyWithRetry(ctx, kustomization, impersonation, source.GetArtifact().Revision, dirPath, 5*time.Second) + changeSet, err := r.applyWithRetry(ctx, *kustomization, impersonation, artifact.Revision, dirPath, 5*time.Second) if err != nil { - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - meta.ReconciliationFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, meta.ReadyCondition, meta.ReconciliationFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err } // prune err = r.prune(ctx, kubeClient, kustomization, checksum) if err != nil { - return kustomizev1.KustomizationNotReady( - kustomization, - source.GetArtifact().Revision, - kustomizev1.PruneFailedReason, - err.Error(), - ), err + conditions.MarkFalse(kustomization, meta.ReadyCondition, kustomizev1.PruneFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err } + kustomization.Status.Snapshot = snapshot // health assessment - err = r.checkHealth(ctx, statusPoller, kustomization, source.GetArtifact().Revision, changeSet != "") + conditions.MarkUnknown(kustomization, kustomizev1.HealthyCondition, "HealthCheckInProgress", "") + conditions.MarkUnknown(kustomization, meta.ReadyCondition, "HealthCheckInProgress", "") + if err := patchHelper.Patch(ctx, kustomization, patchOpts...); err != nil { + logr.FromContext(ctx).Error(err, "unable to patch status for health check in progress") + return ctrl.Result{Requeue: true}, err + } + err = r.checkHealth(ctx, statusPoller, kustomization, artifact.Revision, changeSet != "") if err != nil { - return kustomizev1.KustomizationNotReadySnapshot( - kustomization, - snapshot, - source.GetArtifact().Revision, - kustomizev1.HealthCheckFailedReason, - err.Error(), - ), err - } - - return kustomizev1.KustomizationReady( - kustomization, - snapshot, - source.GetArtifact().Revision, - meta.ReconciliationSucceededReason, - "Applied revision: "+source.GetArtifact().Revision, - ), nil + conditions.MarkFalse(kustomization, meta.ReadyCondition, kustomizev1.HealthCheckFailedReason, err.Error()) + conditions.MarkFalse(kustomization, kustomizev1.HealthyCondition, kustomizev1.HealthCheckFailedReason, err.Error()) + return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, err + } + + kustomization.Status.LastAppliedRevision = artifact.Revision + msg := "Applied revision: " + artifact.Revision + conditions.MarkTrue(kustomization, meta.ReadyCondition, meta.ReconciliationSucceededReason, msg) + conditions.MarkTrue(kustomization, kustomizev1.HealthyCondition, meta.ReconciliationSucceededReason, msg) + return ctrl.Result{RequeueAfter: kustomization.Spec.Interval.Duration}, nil +} + +func (r *KustomizationReconciler) reconcileSource(ctx context.Context, kustomization *kustomizev1.Kustomization, artifact *sourcev1.Artifact) (ctrl.Result, error) { + // resolve source reference + sourceObj, err := r.getSource(ctx, *kustomization) + if err != nil { + switch { + case apierrors.IsNotFound(err): + msg := fmt.Sprintf("Source '%s' not found", kustomization.Spec.SourceRef.String()) + conditions.MarkFalse(kustomization, sourcev1.SourceAvailableCondition, kustomizev1.ArtifactFailedReason, msg) + logr.FromContext(ctx).Info(msg) + return ctrl.Result{RequeueAfter: kustomization.Spec.Interval.Duration}, nil + default: + return ctrl.Result{}, err + } + } + + // Mirror source readiness + conditions.SetMirror(kustomization, sourcev1.SourceRefReadyCondition, sourceObj) + + // Confirm source has an artifact + if sourceObj.GetArtifact() == nil { + conditions.MarkFalse(kustomization, + sourcev1.SourceAvailableCondition, + "NoArtifact", + "No artifact available for %s %q", + kustomization.Spec.SourceRef.Kind, + kustomization.Spec.SourceRef.Name) + // The watcher should notice an artifact change + return ctrl.Result{}, nil + } + + *artifact = *sourceObj.GetArtifact() + return ctrl.Result{}, nil +} + +func (r *KustomizationReconciler) reconcileDependencies(ctx context.Context, kustomization *kustomizev1.Kustomization, revision string) (ctrl.Result, error) { + // check dependencies + if len(kustomization.Spec.DependsOn) > 0 { + if err := r.checkDependencies(*kustomization); err != nil { + conditions.MarkFalse(kustomization, meta.ReadyCondition, meta.DependencyNotReadyReason, err.Error()) + // we can't rely on exponential backoff because it will prolong the execution too much, + // instead we requeue on a fix interval. + msg := fmt.Sprintf("Dependencies do not meet ready condition, retrying in %s", r.requeueDependency.String()) + logr.FromContext(ctx).Info(msg) + r.Events.Event(ctx, kustomization, map[string]string{ + "revision": revision, + }, events.EventSeverityInfo, "DependencyNotReady", msg) + return ctrl.Result{RequeueAfter: r.requeueDependency}, nil + } + + logr.FromContext(ctx).Info("All dependencies area ready, proceeding with reconciliation") + } + + return ctrl.Result{}, nil } func (r *KustomizationReconciler) checkDependencies(kustomization kustomizev1.Kustomization) error { @@ -498,12 +509,12 @@ func (r *KustomizationReconciler) getSource(ctx context.Context, kustomization k return source, nil } -func (r *KustomizationReconciler) generate(ctx context.Context, kubeClient client.Client, kustomization kustomizev1.Kustomization, dirPath string) (string, error) { +func (r *KustomizationReconciler) generate(ctx context.Context, kubeClient client.Client, kustomization *kustomizev1.Kustomization, dirPath string) (string, error) { gen := NewGenerator(kustomization, kubeClient) return gen.WriteFile(ctx, dirPath) } -func (r *KustomizationReconciler) build(ctx context.Context, kustomization kustomizev1.Kustomization, checksum, dirPath string) (*kustomizev1.Snapshot, error) { +func (r *KustomizationReconciler) build(ctx context.Context, kustomization *kustomizev1.Kustomization, checksum, dirPath string) (*kustomizev1.Snapshot, error) { timeout := kustomization.GetTimeout() ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -570,7 +581,7 @@ func (r *KustomizationReconciler) build(ctx context.Context, kustomization kusto return kustomizev1.NewSnapshot(resources, checksum) } -func (r *KustomizationReconciler) validate(ctx context.Context, kustomization kustomizev1.Kustomization, imp *KustomizeImpersonation, dirPath string) error { +func (r *KustomizationReconciler) validate(ctx context.Context, kustomization *kustomizev1.Kustomization, imp *KustomizeImpersonation, dirPath string) error { if kustomization.Spec.Validation == "" || kustomization.Spec.Validation == "none" { return nil } @@ -695,7 +706,9 @@ func (r *KustomizationReconciler) applyWithRetry(ctx context.Context, kustomizat return "", err } else { if changeSet != "" { - r.event(ctx, kustomization, revision, events.EventSeverityInfo, changeSet, nil) + r.Events.Event(ctx, &kustomization, map[string]string{ + "revision": revision, + }, events.EventSeverityInfo, "KustomizationApplied", changeSet) } } } else { @@ -703,13 +716,15 @@ func (r *KustomizationReconciler) applyWithRetry(ctx context.Context, kustomizat } } else { if changeSet != "" && kustomization.Status.LastAppliedRevision != revision { - r.event(ctx, kustomization, revision, events.EventSeverityInfo, changeSet, nil) + r.Events.Event(ctx, &kustomization, map[string]string{ + "revision": revision, + }, events.EventSeverityInfo, "KustomizationApplied", changeSet) } } return changeSet, nil } -func (r *KustomizationReconciler) prune(ctx context.Context, kubeClient client.Client, kustomization kustomizev1.Kustomization, newChecksum string) error { +func (r *KustomizationReconciler) prune(ctx context.Context, kubeClient client.Client, kustomization *kustomizev1.Kustomization, newChecksum string) error { if !kustomization.Spec.Prune || kustomization.Status.Snapshot == nil { return nil } @@ -728,13 +743,15 @@ func (r *KustomizationReconciler) prune(ctx context.Context, kubeClient client.C } else { if output != "" { log.Info(fmt.Sprintf("garbage collection completed: %s", output)) - r.event(ctx, kustomization, newChecksum, events.EventSeverityInfo, output, nil) + r.Events.Event(ctx, kustomization, map[string]string{ + "revision": newChecksum, + }, events.EventSeverityInfo, "GarbageCollectionCompleted", output) } } return nil } -func (r *KustomizationReconciler) checkHealth(ctx context.Context, statusPoller *polling.StatusPoller, kustomization kustomizev1.Kustomization, revision string, changed bool) error { +func (r *KustomizationReconciler) checkHealth(ctx context.Context, statusPoller *polling.StatusPoller, kustomization *kustomizev1.Kustomization, revision string, changed bool) error { if len(kustomization.Spec.HealthChecks) == 0 { return nil } @@ -749,12 +766,15 @@ func (r *KustomizationReconciler) checkHealth(ctx context.Context, statusPoller healthy := healthiness != nil && healthiness.Status == metav1.ConditionTrue if !healthy || (kustomization.Status.LastAppliedRevision != revision && changed) { - r.event(ctx, kustomization, revision, events.EventSeverityInfo, "Health check passed", nil) + r.Events.Event(ctx, kustomization, map[string]string{ + "revision": revision, + }, events.EventSeverityInfo, "HealthCheckPassed", "Health check passed") } + return nil } -func (r *KustomizationReconciler) reconcileDelete(ctx context.Context, kustomization kustomizev1.Kustomization) (ctrl.Result, error) { +func (r *KustomizationReconciler) reconcileDelete(ctx context.Context, kustomization *kustomizev1.Kustomization) (ctrl.Result, error) { log := logr.FromContext(ctx) if kustomization.Spec.Prune && !kustomization.Spec.Suspend { // create any necessary kube-clients @@ -766,94 +786,21 @@ func (r *KustomizationReconciler) reconcileDelete(ctx context.Context, kustomiza return ctrl.Result{}, err } if err := r.prune(ctx, client, kustomization, ""); err != nil { - r.event(ctx, kustomization, kustomization.Status.LastAppliedRevision, events.EventSeverityError, "pruning for deleted resource failed", nil) + r.Events.Event(ctx, kustomization, map[string]string{ + "revision": kustomization.Status.LastAppliedRevision, + }, events.EventSeverityError, "GarbageCollectionFailed", "pruning for deleted resource failed") // Return the error so we retry the failed garbage collection return ctrl.Result{}, err } } - // Record deleted status - r.recordReadiness(ctx, kustomization) - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&kustomization, kustomizev1.KustomizationFinalizer) - if err := r.Update(ctx, &kustomization); err != nil { - return ctrl.Result{}, err - } + controllerutil.RemoveFinalizer(kustomization, kustomizev1.KustomizationFinalizer) // Stop reconciliation as the object is being deleted return ctrl.Result{}, nil } -func (r *KustomizationReconciler) event(ctx context.Context, kustomization kustomizev1.Kustomization, revision, severity, msg string, metadata map[string]string) { - log := logr.FromContext(ctx) - r.EventRecorder.Event(&kustomization, "Normal", severity, msg) - objRef, err := reference.GetReference(r.Scheme, &kustomization) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if r.ExternalEventRecorder != nil { - if metadata == nil { - metadata = map[string]string{} - } - if revision != "" { - metadata["revision"] = revision - } - - reason := severity - if c := apimeta.FindStatusCondition(kustomization.Status.Conditions, meta.ReadyCondition); c != nil { - reason = c.Reason - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, metadata, severity, reason, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *KustomizationReconciler) recordReadiness(ctx context.Context, kustomization kustomizev1.Kustomization) { - if r.MetricsRecorder == nil { - return - } - log := logr.FromContext(ctx) - - objRef, err := reference.GetReference(r.Scheme, &kustomization) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(kustomization.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !kustomization.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !kustomization.DeletionTimestamp.IsZero()) - } -} - -func (r *KustomizationReconciler) recordSuspension(ctx context.Context, kustomization kustomizev1.Kustomization) { - if r.MetricsRecorder == nil { - return - } - log := logr.FromContext(ctx) - - objRef, err := reference.GetReference(r.Scheme, &kustomization) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !kustomization.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, kustomization.Spec.Suspend) - } -} - func (r *KustomizationReconciler) patchStatus(ctx context.Context, req ctrl.Request, newStatus kustomizev1.KustomizationStatus) error { var kustomization kustomizev1.Kustomization if err := r.Get(ctx, req.NamespacedName, &kustomization); err != nil { diff --git a/controllers/kustomization_decryptor.go b/controllers/kustomization_decryptor.go index 96ef07ba..54681633 100644 --- a/controllers/kustomization_decryptor.go +++ b/controllers/kustomization_decryptor.go @@ -61,13 +61,13 @@ func NewDecryptor(kubeClient client.Client, } func NewTempDecryptor(kubeClient client.Client, - kustomization kustomizev1.Kustomization) (*KustomizeDecryptor, func(), error) { + kustomization *kustomizev1.Kustomization) (*KustomizeDecryptor, func(), error) { tmpDir, err := ioutil.TempDir("", fmt.Sprintf("decryptor-%s-", kustomization.Name)) if err != nil { return nil, nil, fmt.Errorf("tmp dir error: %w", err) } cleanup := func() { os.RemoveAll(tmpDir) } - return NewDecryptor(kubeClient, kustomization, tmpDir), cleanup, nil + return NewDecryptor(kubeClient, *kustomization, tmpDir), cleanup, nil } func (kd *KustomizeDecryptor) Decrypt(res *resource.Resource) (*resource.Resource, error) { diff --git a/controllers/kustomization_generator.go b/controllers/kustomization_generator.go index 8ec0e076..e33fd539 100644 --- a/controllers/kustomization_generator.go +++ b/controllers/kustomization_generator.go @@ -45,11 +45,11 @@ const ( ) type KustomizeGenerator struct { - kustomization kustomizev1.Kustomization + kustomization *kustomizev1.Kustomization client.Client } -func NewGenerator(kustomization kustomizev1.Kustomization, kubeClient client.Client) *KustomizeGenerator { +func NewGenerator(kustomization *kustomizev1.Kustomization, kubeClient client.Client) *KustomizeGenerator { return &KustomizeGenerator{ kustomization: kustomization, Client: kubeClient, diff --git a/controllers/kustomization_healthcheck.go b/controllers/kustomization_healthcheck.go index e6d3f13c..7f734a2d 100644 --- a/controllers/kustomization_healthcheck.go +++ b/controllers/kustomization_healthcheck.go @@ -35,11 +35,11 @@ import ( ) type KustomizeHealthCheck struct { - kustomization kustomizev1.Kustomization + kustomization *kustomizev1.Kustomization statusPoller *polling.StatusPoller } -func NewHealthCheck(kustomization kustomizev1.Kustomization, statusPoller *polling.StatusPoller) *KustomizeHealthCheck { +func NewHealthCheck(kustomization *kustomizev1.Kustomization, statusPoller *polling.StatusPoller) *KustomizeHealthCheck { return &KustomizeHealthCheck{ kustomization: kustomization, statusPoller: statusPoller, diff --git a/controllers/kustomization_impersonation.go b/controllers/kustomization_impersonation.go index c013951b..e8abf84c 100644 --- a/controllers/kustomization_impersonation.go +++ b/controllers/kustomization_impersonation.go @@ -35,13 +35,13 @@ import ( type KustomizeImpersonation struct { workdir string - kustomization kustomizev1.Kustomization + kustomization *kustomizev1.Kustomization statusPoller *polling.StatusPoller client.Client } func NewKustomizeImpersonation( - kustomization kustomizev1.Kustomization, + kustomization *kustomizev1.Kustomization, kubeClient client.Client, statusPoller *polling.StatusPoller, workdir string) *KustomizeImpersonation { diff --git a/controllers/kustomization_varsub.go b/controllers/kustomization_varsub.go index 67b3e840..0bb76f2f 100644 --- a/controllers/kustomization_varsub.go +++ b/controllers/kustomization_varsub.go @@ -26,7 +26,7 @@ const varsubRegex = "^[_[:alpha:]][_[:alpha:][:digit:]]*$" func substituteVariables( ctx context.Context, kubeClient client.Client, - kustomization kustomizev1.Kustomization, + kustomization *kustomizev1.Kustomization, res *resource.Resource) (*resource.Resource, error) { resData, err := res.AsYAML() if err != nil { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index ada51190..03806244 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "github.com/fluxcd/pkg/runtime/controller" "math/rand" "os" "path/filepath" @@ -90,11 +91,13 @@ var _ = BeforeSuite(func(done Done) { }) Expect(err).ToNot(HaveOccurred()) + metrics := controller.MustMakeMetrics(k8sManager) + events := controller.MakeEvents(k8sManager, "kustomize-controller", nil) + err = (&KustomizationReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - EventRecorder: k8sManager.GetEventRecorderFor("kustomize-controller"), - ExternalEventRecorder: nil, + Client: k8sManager.GetClient(), + Events: events, + Metrics: metrics, }).SetupWithManager(k8sManager, KustomizationReconcilerOptions{MaxConcurrentReconciles: 1}) Expect(err).ToNot(HaveOccurred(), "failed to setup KustomizationReconciler") diff --git a/go.mod b/go.mod index 79ba37dd..f3995649 100644 --- a/go.mod +++ b/go.mod @@ -12,10 +12,10 @@ require ( github.com/fluxcd/kustomize-controller/api v0.12.0 github.com/fluxcd/pkg/apis/kustomize v0.0.1 github.com/fluxcd/pkg/apis/meta v0.9.0 - github.com/fluxcd/pkg/runtime v0.11.0 + github.com/fluxcd/pkg/runtime v0.11.1-0.20210514212714-849f4a7f244f github.com/fluxcd/pkg/testserver v0.0.2 github.com/fluxcd/pkg/untar v0.0.5 - github.com/fluxcd/source-controller/api v0.12.1 + github.com/fluxcd/source-controller/api v0.13.1-0.20210603122717-b629c741bdca github.com/go-logr/logr v0.3.0 github.com/hashicorp/go-retryablehttp v0.6.8 github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c diff --git a/go.sum b/go.sum index c2a8ec82..ff13f162 100644 --- a/go.sum +++ b/go.sum @@ -198,14 +198,17 @@ github.com/fluxcd/pkg/apis/kustomize v0.0.1 h1:TkA80R0GopRY27VJqzKyS6ifiKIAfwBd7 github.com/fluxcd/pkg/apis/kustomize v0.0.1/go.mod h1:JAFPfnRmcrAoG1gNiA8kmEXsnOBuDyZ/F5X4DAQcVV0= github.com/fluxcd/pkg/apis/meta v0.9.0 h1:rxW69p+VmJCKXXkaRYnovRBFlKjd+MJQfm2RrB0B4j8= github.com/fluxcd/pkg/apis/meta v0.9.0/go.mod h1:yHuY8kyGHYz22I0jQzqMMGCcHViuzC/WPdo9Gisk8Po= -github.com/fluxcd/pkg/runtime v0.11.0 h1:FPsiu1k5NQGl2tsaXH5WgSmrOMg7o44jdOP0rW/TI1Y= github.com/fluxcd/pkg/runtime v0.11.0/go.mod h1:ZjAwug6DBLXwo9UdP1/tTPyuWpK9kZ0BEJbctbuEB1o= +github.com/fluxcd/pkg/runtime v0.11.1-0.20210514212714-849f4a7f244f h1:LZIsKBl9Px7y/RMl6zU74t11+NTYdbtxzt1JkI8tY50= +github.com/fluxcd/pkg/runtime v0.11.1-0.20210514212714-849f4a7f244f/go.mod h1:vKV1eL4j9TjM6hhWopKeEBmrqXR8SJys0ir3D1GPtKE= github.com/fluxcd/pkg/testserver v0.0.2 h1:SoaMtO9cE5p/wl2zkGudzflnEHd9mk68CGjZOo7w0Uk= github.com/fluxcd/pkg/testserver v0.0.2/go.mod h1:pgUZTh9aQ44FSTQo+5NFlh7YMbUfdz1B80DalW7k96Y= github.com/fluxcd/pkg/untar v0.0.5 h1:UGI3Ch1UIEIaqQvMicmImL1s9npQa64DJ/ozqHKB7gk= github.com/fluxcd/pkg/untar v0.0.5/go.mod h1:O6V9+rtl8c1mHBafgqFlJN6zkF1HS5SSYn7RpQJ/nfw= github.com/fluxcd/source-controller/api v0.12.1 h1:ubO3gwGaxnXwayJeDHpdsh96NXwOLpFcbLjZo/pqWCg= github.com/fluxcd/source-controller/api v0.12.1/go.mod h1:+EPyhxC7Y+hUnq7EwAkkLtfbwCxJxF5yfmiyzDk43KY= +github.com/fluxcd/source-controller/api v0.13.1-0.20210603122717-b629c741bdca h1:u2DB4/cvLFAdrbB/WQSpLhLK1DtzcD6Rrtbg1wbU0vU= +github.com/fluxcd/source-controller/api v0.13.1-0.20210603122717-b629c741bdca/go.mod h1:+EPyhxC7Y+hUnq7EwAkkLtfbwCxJxF5yfmiyzDk43KY= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= diff --git a/main.go b/main.go index f89c43c1..3ef3c5da 100644 --- a/main.go +++ b/main.go @@ -18,25 +18,23 @@ package main import ( "fmt" + helper "github.com/fluxcd/pkg/runtime/controller" "os" "time" - flag "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "sigs.k8s.io/cli-utils/pkg/kstatus/polling" - ctrl "sigs.k8s.io/controller-runtime" - crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" - "github.com/fluxcd/pkg/runtime/client" "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" - "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + flag "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/cli-utils/pkg/kstatus/polling" + ctrl "sigs.k8s.io/controller-runtime" kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1beta1" "github.com/fluxcd/kustomize-controller/controllers" @@ -89,17 +87,14 @@ func main() { var eventRecorder *events.Recorder if eventsAddr != "" { - if er, err := events.NewRecorder(eventsAddr, controllerName); err != nil { + er, err := events.NewRecorder(eventsAddr, controllerName) + if err != nil { setupLog.Error(err, "unable to create event recorder") os.Exit(1) - } else { - eventRecorder = er } + eventRecorder = er } - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) - watchNamespace := "" if !watchAllNamespaces { watchNamespace = os.Getenv("RUNTIME_NAMESPACE") @@ -128,13 +123,14 @@ func main() { probes.SetupChecks(mgr, setupLog) pprof.SetupHandlers(mgr, setupLog) + mgrEvents := helper.MakeEvents(mgr, controllerName, eventRecorder) + mgrMetrics := helper.MustMakeMetrics(mgr) + if err = (&controllers.KustomizationReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, - StatusPoller: polling.NewStatusPoller(mgr.GetClient(), mgr.GetRESTMapper()), + Client: mgr.GetClient(), + Events: mgrEvents, + Metrics: mgrMetrics, + StatusPoller: polling.NewStatusPoller(mgr.GetClient(), mgr.GetRESTMapper()), }).SetupWithManager(mgr, controllers.KustomizationReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency,