-
Notifications
You must be signed in to change notification settings - Fork 60
/
eraser_test.go
195 lines (165 loc) · 5.91 KB
/
eraser_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"time"
"github.com/Azure/eraser/test/e2e/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/k8s/resources"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
collectorLabel = "name=collector"
eraserLabel = "name=eraser"
restartTimeout = time.Minute
)
func TestImageListTriggersEraserImageJob(t *testing.T) {
rmImageFeat := features.New("An ImageList should trigger an eraser ImageJob").
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
podSelectorLabels := map[string]string{"app": util.Nginx}
nginxDep := util.NewDeployment(cfg.Namespace(), util.Nginx, 2, podSelectorLabels, corev1.Container{Image: util.Nginx, Name: util.Nginx})
if err := cfg.Client().Resources().Create(ctx, nginxDep); err != nil {
t.Error("Failed to create the dep", err)
}
client := cfg.Client()
// wait for all collector pods to be present before removing them
err := wait.For(
util.NumPodsPresentForLabel(ctx, client, 3, collectorLabel),
wait.WithTimeout(time.Minute*2),
wait.WithInterval(time.Millisecond*500),
)
if err != nil {
t.Fatal(err)
}
if err := util.DeleteImageListsAndJobs(cfg.KubeconfigFile()); err != nil {
t.Error("Failed to clean eraser obejcts ", err)
}
// wait for collector deployment to be removed, to prevent conflicts or races
err = wait.For(
util.NumPodsPresentForLabel(ctx, client, 0, collectorLabel),
wait.WithTimeout(time.Minute*2),
wait.WithInterval(time.Millisecond*500),
)
if err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("deployment successfully deployed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
resultDeployment := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Nginx, Namespace: cfg.Namespace()},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&resultDeployment, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout)); err != nil {
t.Error("deployment not found", err)
}
return context.WithValue(ctx, util.Nginx, &resultDeployment)
}).
Assess("Images successfully deleted from all nodes", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// delete deployment
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
var pods corev1.PodList
err = client.Resources().List(ctx, &pods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(labels.Set{"app": util.Nginx}).String()
})
if err != nil {
t.Fatal(err)
}
dep := ctx.Value(util.Nginx).(*appsv1.Deployment)
if err := client.Resources().Delete(ctx, dep); err != nil {
t.Error("Failed to delete the dep", err)
}
for _, nodeName := range util.GetClusterNodes(t) {
err := wait.For(util.ContainerNotPresentOnNode(nodeName, util.Nginx), wait.WithTimeout(util.Timeout))
if err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
}
// deploy imageJob config
if err := util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), "../../test-data", "eraser_v1alpha1_imagelist.yaml"); err != nil {
t.Error("Failed to deploy image list config", err)
}
podNames := []string{}
// get eraser pod name
err = wait.For(func() (bool, error) {
l := corev1.PodList{}
err = client.Resources().List(ctx, &l, resources.WithLabelSelector("name=eraser"))
if err != nil {
return false, err
}
if len(l.Items) != 3 {
return false, nil
}
for _, pod := range l.Items {
podNames = append(podNames, pod.ObjectMeta.Name)
}
return true, nil
}, wait.WithTimeout(time.Minute*2), wait.WithInterval(time.Millisecond*500))
if err != nil {
t.Fatal(err)
}
// wait for those specific pods to no longer exist, so that when we
// check later for an accidental redeployment, we are sure it is
// actually a new deployment.
err = wait.For(func() (bool, error) {
var l corev1.PodList
err = client.Resources().List(ctx, &l, resources.WithLabelSelector("name=eraser"))
if err != nil {
return false, err
}
if len(l.Items) == 0 {
return true, nil
}
for _, name := range podNames {
for _, pod := range l.Items {
if name == pod.ObjectMeta.Name {
return false, nil
}
}
}
return true, nil
}, wait.WithTimeout(time.Minute*2), wait.WithInterval(time.Millisecond*500))
if err != nil {
t.Fatal(err)
}
t.Logf("initial eraser deployment cleaned up")
ctxT, cancel := context.WithTimeout(ctx, time.Minute*3)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Nginx)
return ctx
}).
Assess("Eraser job was not restarted", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// until a timeout is reached, make sure there are no pods matching
// the selector name=eraser
client := cfg.Client()
ctxT2, cancel := context.WithTimeout(ctx, restartTimeout)
defer cancel()
util.CheckDeploymentCleanedUp(ctxT2, t, client)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetManagerLogs(ctx, cfg, t); err != nil {
t.Error("error getting manager logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, rmImageFeat)
}