Skip to content

Commit

Permalink
feat(layer): align status with kept terraformruns
Browse files Browse the repository at this point in the history
  • Loading branch information
Alan-pad committed May 20, 2024
1 parent 3d945c2 commit e5a549c
Show file tree
Hide file tree
Showing 12 changed files with 67 additions and 196 deletions.
9 changes: 3 additions & 6 deletions api/v1alpha1/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@ import (
)

const (
PlanRunRetention int = 6
ApplyRunRetention int = 6
DefaultRunRetention int = 10
)

type OverrideRunnerSpec struct {
Expand All @@ -31,8 +30,7 @@ type MetadataOverride struct {
}

type RunHistoryPolicy struct {
KeepLastPlanRuns *int `json:"plan,omitempty"`
KeepLastApplyRuns *int `json:"apply,omitempty"`
KeepLastRuns *int `json:"runs,omitempty"`
}

type RemediationStrategy struct {
Expand Down Expand Up @@ -89,8 +87,7 @@ func GetOverrideRunnerSpec(repository *TerraformRepository, layer *TerraformLaye

func GetRunHistoryPolicy(repository *TerraformRepository, layer *TerraformLayer) RunHistoryPolicy {
return RunHistoryPolicy{
KeepLastPlanRuns: chooseInt(repository.Spec.RunHistoryPolicy.KeepLastPlanRuns, layer.Spec.RunHistoryPolicy.KeepLastPlanRuns, PlanRunRetention),
KeepLastApplyRuns: chooseInt(repository.Spec.RunHistoryPolicy.KeepLastApplyRuns, layer.Spec.RunHistoryPolicy.KeepLastApplyRuns, ApplyRunRetention),
KeepLastRuns: chooseInt(repository.Spec.RunHistoryPolicy.KeepLastRuns, layer.Spec.RunHistoryPolicy.KeepLastRuns, 10),
}
}

Expand Down
28 changes: 9 additions & 19 deletions api/v1alpha1/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1581,15 +1581,13 @@ func TestGetHistoryPolicy(t *testing.T) {
&configv1alpha1.TerraformRepository{
Spec: configv1alpha1.TerraformRepositorySpec{
RunHistoryPolicy: configv1alpha1.RunHistoryPolicy{
KeepLastPlanRuns: intPointer(10),
KeepLastApplyRuns: intPointer(10),
KeepLastRuns: intPointer(10),
},
},
},
&configv1alpha1.TerraformLayer{},
configv1alpha1.RunHistoryPolicy{
KeepLastPlanRuns: intPointer(10),
KeepLastApplyRuns: intPointer(10),
KeepLastRuns: intPointer(10),
},
},
{
Expand All @@ -1598,49 +1596,41 @@ func TestGetHistoryPolicy(t *testing.T) {
&configv1alpha1.TerraformLayer{
Spec: configv1alpha1.TerraformLayerSpec{
RunHistoryPolicy: configv1alpha1.RunHistoryPolicy{
KeepLastPlanRuns: intPointer(10),
KeepLastApplyRuns: intPointer(10),
KeepLastRuns: intPointer(10),
},
},
},
configv1alpha1.RunHistoryPolicy{
KeepLastPlanRuns: intPointer(10),
KeepLastApplyRuns: intPointer(10),
KeepLastRuns: intPointer(10),
},
},
{
"OverrideRepositoryWithLayer",
&configv1alpha1.TerraformRepository{
Spec: configv1alpha1.TerraformRepositorySpec{
RunHistoryPolicy: configv1alpha1.RunHistoryPolicy{
KeepLastPlanRuns: intPointer(10),
KeepLastApplyRuns: intPointer(10),
KeepLastRuns: intPointer(10),
},
},
},
&configv1alpha1.TerraformLayer{
Spec: configv1alpha1.TerraformLayerSpec{
RunHistoryPolicy: configv1alpha1.RunHistoryPolicy{
KeepLastPlanRuns: intPointer(6),
KeepLastApplyRuns: intPointer(5),
KeepLastRuns: intPointer(5),
},
},
},
configv1alpha1.RunHistoryPolicy{
KeepLastPlanRuns: intPointer(6),
KeepLastApplyRuns: intPointer(5),
KeepLastRuns: intPointer(5),
},
},
}

for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
result := configv1alpha1.GetRunHistoryPolicy(tc.repository, tc.layer)
if *tc.expectedHistoryPolicy.KeepLastPlanRuns != *result.KeepLastPlanRuns {
t.Errorf("different plan policy computed: expected %d got %d", *tc.expectedHistoryPolicy.KeepLastPlanRuns, *result.KeepLastPlanRuns)
}
if *tc.expectedHistoryPolicy.KeepLastApplyRuns != *result.KeepLastApplyRuns {
t.Errorf("different apply policy computed: expected %d got %d", *tc.expectedHistoryPolicy.KeepLastApplyRuns, *result.KeepLastApplyRuns)
if *tc.expectedHistoryPolicy.KeepLastRuns != *result.KeepLastRuns {
t.Errorf("different policy computed: expected %d got %d", *tc.expectedHistoryPolicy.KeepLastRuns, *result.KeepLastRuns)
}
})
}
Expand Down
9 changes: 2 additions & 7 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions cmd/controllers/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ func buildControllersStartCmd(app *burrito.App) *cobra.Command {
},
}

defaultDriftDetectionTimer, _ := time.ParseDuration("1h")
defaultOnErrorTimer, _ := time.ParseDuration("30s")
defaultWaitActionTimer, _ := time.ParseDuration("15s")
defaultDriftDetectionTimer, _ := time.ParseDuration("4h")
defaultOnErrorTimer, _ := time.ParseDuration("10s")
defaultWaitActionTimer, _ := time.ParseDuration("5s")
defaultFailureGracePeriod, _ := time.ParseDuration("15s")

cmd.Flags().StringSliceVar(&app.Config.Controller.Namespaces, "namespaces", []string{"burrito-system"}, "list of namespaces to watch")
Expand Down
33 changes: 3 additions & 30 deletions deploy/charts/burrito/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,10 @@ config:
burrito:
# Burrito controller configuration
controller:
# -- By default, the controller will watch all namespaces
# -- By default, the controller will only watch the tenants namespaces
namespaces: []
timers:
driftDetection: 1h
onError: 30s
waitAction: 15s
failureGracePeriod: 15s
terraformMaxRetries: 5
timers: {}
terraformMaxRetries: 3
types: ["layer", "repository", "run", "pullrequest"]
leaderElection:
enabled: true
Expand Down Expand Up @@ -70,29 +66,6 @@ config:
runner:
sshKnownHostsConfigMapName: burrito-ssh-known-hosts

# redis:
# enabled: true
# metadata:
# labels:
# app.kubernetes.io/component: redis
# app.kubernetes.io/name: burrito-redis
# deployment:
# image:
# repository: redis
# tag: "7.2.4-alpine"
# pullPolicy: Always
# args: []
# podSecurityContext:
# runAsNonRoot: true
# runAsUser: 999
# seccompProfile:
# type: RuntimeDefault
# service:
# ports:
# - name: tcp-redis
# port: 6379
# targetPort: 6379

hermitcrab:
metadata:
labels:
Expand Down
1 change: 1 addition & 0 deletions internal/burrito/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ type ControllerConfig struct {
KubernetesWebhookPort int `mapstructure:"kubernetesWebhookPort"`
GithubConfig GithubConfig `mapstructure:"githubConfig"`
GitlabConfig GitlabConfig `mapstructure:"gitlabConfig"`
RunParallelism int `mapstructure:"runParallelism"`
}

type GithubConfig struct {
Expand Down
47 changes: 40 additions & 7 deletions internal/controllers/terraformlayer/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,10 +109,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
log.Errorf("failed to get TerraformRepository linked to layer %s: %s", layer.Name, err)
return ctrl.Result{RequeueAfter: r.Config.Controller.Timers.OnError}, err
}
err = r.cleanupRuns(ctx, layer, repository)
if err != nil {
log.Warningf("failed to cleanup runs for layer %s: %s", layer.Name, err)
}
state, conditions := r.GetState(ctx, layer)
lastResult, err := r.Datastore.GetPlan(layer.Namespace, layer.Name, layer.Status.LastRun.Name, "", "short")
if err != nil {
Expand All @@ -124,18 +120,55 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
runHistory := layer.Status.LatestRuns
if run != nil {
lastRun = getRun(*run)
runHistory = updateLatestRuns(runHistory, *run)
runHistory = updateLatestRuns(runHistory, *run, *configv1alpha1.GetRunHistoryPolicy(repository, layer).KeepLastRuns)
}
layer.Status = configv1alpha1.TerraformLayerStatus{Conditions: conditions, State: getStateString(state), LastResult: string(lastResult), LastRun: lastRun, LatestRuns: runHistory}
err = r.Client.Status().Update(ctx, layer)
if err != nil {
r.Recorder.Event(layer, corev1.EventTypeWarning, "Reconciliation", "Could not update layer status")
log.Errorf("could not update layer %s status: %s", layer.Name, err)
}
err = r.cleanupRuns(ctx, layer, repository)
if err != nil {
log.Warningf("failed to cleanup runs for layer %s: %s", layer.Name, err)
}
log.Infof("finished reconciliation cycle for layer %s/%s", layer.Namespace, layer.Name)
return result, nil
}

func (r *Reconciler) cleanupRuns(ctx context.Context, layer *configv1alpha1.TerraformLayer, repository *configv1alpha1.TerraformRepository) error {
historyPolicy := configv1alpha1.GetRunHistoryPolicy(repository, layer)
if len(layer.Status.LatestRuns) < *historyPolicy.KeepLastRuns {
log.Infof("no runs to delete for layer %s", layer.Name)
return nil
}
runs, err := r.getAllRuns(ctx, layer)
if err != nil {
return err
}
runsToKeep := map[string]bool{}
for _, run := range layer.Status.LatestRuns {
runsToKeep[run.Name] = true
}
toDelete := []*configv1alpha1.TerraformRun{}
for _, run := range runs {
if _, ok := runsToKeep[run.Name]; !ok {
toDelete = append(toDelete, run)
}
}
if len(toDelete) == 0 {
log.Infof("no runs to delete for layer %s", layer.Name)
return nil
}
err = deleteAll(ctx, r.Client, toDelete)
if err != nil {
return err
}
log.Infof("deleted %d runs for layer %s", len(toDelete), layer.Name)
r.Recorder.Event(layer, corev1.EventTypeNormal, "Reconciliation", "Cleaned up old runs")
return nil
}

func getRun(run configv1alpha1.TerraformRun) configv1alpha1.TerraformLayerRun {
return configv1alpha1.TerraformLayerRun{
Name: run.Name,
Expand All @@ -145,7 +178,7 @@ func getRun(run configv1alpha1.TerraformRun) configv1alpha1.TerraformLayerRun {
}
}

func updateLatestRuns(runs []configv1alpha1.TerraformLayerRun, run configv1alpha1.TerraformRun) []configv1alpha1.TerraformLayerRun {
func updateLatestRuns(runs []configv1alpha1.TerraformLayerRun, run configv1alpha1.TerraformRun, keep int) []configv1alpha1.TerraformLayerRun {
oldestRun := &configv1alpha1.TerraformLayerRun{
Date: metav1.NewTime(time.Now()),
}
Expand All @@ -157,7 +190,7 @@ func updateLatestRuns(runs []configv1alpha1.TerraformLayerRun, run configv1alpha
oldestRunIndex = i
}
}
if oldestRun == nil || len(runs) < 5 {
if oldestRun == nil || len(runs) < keep {
return append(runs, newRun)
}
rs := append(runs[:oldestRunIndex], runs[oldestRunIndex+1:]...)
Expand Down
56 changes: 2 additions & 54 deletions internal/controllers/terraformlayer/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,12 @@ package terraformlayer
import (
"context"
"fmt"
"sort"
"strings"
"sync"

configv1alpha1 "github.com/padok-team/burrito/api/v1alpha1"
"github.com/padok-team/burrito/internal/annotations"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
Expand Down Expand Up @@ -62,7 +60,7 @@ func (r *Reconciler) getRun(layer *configv1alpha1.TerraformLayer, repository *co
}
}

func (r *Reconciler) getAllFinishedRuns(ctx context.Context, layer *configv1alpha1.TerraformLayer, repository *configv1alpha1.TerraformRepository) ([]*configv1alpha1.TerraformRun, error) {
func (r *Reconciler) getAllRuns(ctx context.Context, layer *configv1alpha1.TerraformLayer) ([]*configv1alpha1.TerraformRun, error) {
list := &configv1alpha1.TerraformRunList{}
labelSelector := labels.NewSelector()
for key, value := range GetDefaultLabels(layer) {
Expand All @@ -85,9 +83,7 @@ func (r *Reconciler) getAllFinishedRuns(ctx context.Context, layer *configv1alph
// Keep only runs with state Succeeded or Failed
var runs []*configv1alpha1.TerraformRun
for _, run := range list.Items {
if run.Status.State == "Succeeded" || run.Status.State == "Failed" {
runs = append(runs, &run)
}
runs = append(runs, &run)
}
return runs, nil
}
Expand Down Expand Up @@ -125,51 +121,3 @@ func deleteAll(ctx context.Context, c client.Client, objs []*configv1alpha1.Terr

return ret
}

func (r *Reconciler) cleanupRuns(ctx context.Context, layer *configv1alpha1.TerraformLayer, repository *configv1alpha1.TerraformRepository) error {
historyPolicy := configv1alpha1.GetRunHistoryPolicy(repository, layer)

runs, err := r.getAllFinishedRuns(ctx, layer, repository)
if err != nil {
return err
}
sortedRuns := sortAndSplitRunsByAction(runs)
toDelete := []*configv1alpha1.TerraformRun{}
if len(sortedRuns[string(PlanAction)]) <= *historyPolicy.KeepLastPlanRuns {
log.Infof("no plan runs to delete for layer %s", layer.Name)
} else {
toDelete = append(toDelete, sortedRuns[string(PlanAction)][:len(sortedRuns[string(PlanAction)])-*historyPolicy.KeepLastPlanRuns]...)
}
if len(sortedRuns[string(ApplyAction)]) <= *historyPolicy.KeepLastApplyRuns {
log.Infof("no apply runs to delete for layer %s", layer.Name)
} else {
toDelete = append(toDelete, sortedRuns[string(ApplyAction)][:len(sortedRuns[string(ApplyAction)])-*historyPolicy.KeepLastApplyRuns]...)
}
if len(toDelete) == 0 {
log.Infof("no runs to delete for layer %s", layer.Name)
return nil
}
err = deleteAll(ctx, r.Client, toDelete)
if err != nil {
return err
}
log.Infof("deleted %d runs for layer %s", len(toDelete), layer.Name)
r.Recorder.Event(layer, corev1.EventTypeNormal, "Reconciliation", "Cleaned up old runs")
return nil
}

func sortAndSplitRunsByAction(runs []*configv1alpha1.TerraformRun) map[string][]*configv1alpha1.TerraformRun {
splittedRuns := map[string][]*configv1alpha1.TerraformRun{}
for _, run := range runs {
if _, ok := splittedRuns[run.Spec.Action]; !ok {
splittedRuns[run.Spec.Action] = []*configv1alpha1.TerraformRun{}
}
splittedRuns[run.Spec.Action] = append(splittedRuns[run.Spec.Action], run)
}
for action := range splittedRuns {
sort.Slice(splittedRuns[action], func(i, j int) bool {
return splittedRuns[action][i].CreationTimestamp.Before(&splittedRuns[action][j].CreationTimestamp)
})
}
return splittedRuns
}
Loading

0 comments on commit e5a549c

Please sign in to comment.