Skip to content

Commit

Permalink
e2e: confirm the cleanup of PVs with legacy affinity attributes
Browse files Browse the repository at this point in the history
This applies a small refactor to the e2e tests to ensure that the newer
provisioner is capable of siting helper pods correctly to clean up PVs
with "legacy" affinity constraints.

The kind cluster itself is reconfigured to ensure that all nodes have
`metadata.name` != `metadata.labels["kubernetes.io/hostname"]`, which is
an assumption that does not hold for many cloud providers.
  • Loading branch information
jan-g authored and derekbit committed Aug 31, 2024
1 parent ea957ee commit 59a15da
Show file tree
Hide file tree
Showing 6 changed files with 86 additions and 21 deletions.
51 changes: 32 additions & 19 deletions test/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,38 +82,38 @@ func TestPVCTestSuite(t *testing.T) {
func (p *PodTestSuite) TestPodWithHostPathVolume() {
p.kustomizeDir = "pod"

runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
}

func (p *PodTestSuite) TestPodWithLocalVolume() {
p.kustomizeDir = "pod-with-local-volume"

runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
}

func (p *PodTestSuite) TestPodWithLocalVolumeDefault() {
p.kustomizeDir = "pod-with-default-local-volume"

runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
}

func (p *PodTestSuite) TestPodWithNodeAffinity() {
p.kustomizeDir = "pod-with-node-affinity"

runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
}

func (p *PodTestSuite) TestPodWithRWOPVolume() {
p.kustomizeDir = "pod-with-rwop-volume"

runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
}

func (p *PodTestSuite) TestPodWithSecurityContext() {
p.kustomizeDir = "pod-with-security-context"
kustomizeDir := testdataFile(p.kustomizeDir)

runTest(p, []string{p.config.IMAGE}, "podscheduled", hostPathVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("podscheduled"), hostPathVolumeType)

cmd := fmt.Sprintf(`kubectl get pod -l %s=%s -o=jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].reason}'`, LabelKey, LabelValue)

Expand Down Expand Up @@ -142,22 +142,33 @@ loop:
func (p *PodTestSuite) TestPodWithSubpath() {
p.kustomizeDir = "pod-with-subpath"

runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
}

func (p *PodTestSuite) xxTestPodWithMultipleStorageClasses() {
p.kustomizeDir = "multiple-storage-classes"

runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
}

func (p *PodTestSuite) TestPodWithCustomPathPatternStorageClasses() {
p.kustomizeDir = "custom-path-pattern"

runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
}

func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string) {
func (p *PodTestSuite) TestPodWithLegacyAffinityConstraint() {
// The helper pod should be correctly scheduled
p.kustomizeDir = "pv-with-legacy-affinity"

runTest(p, []string{p.config.IMAGE}, "kubectl wait pv pvc-to-clean-up --for delete --timeout=120s", "")
}

func waitCondition(waitCondition string) string {
return fmt.Sprintf("kubectl wait pod -l %s=%s --for condition=%s --timeout=120s", LabelKey, LabelValue, waitCondition)
}

func runTest(p *PodTestSuite, images []string, waitCmd, volumeType string) {
kustomizeDir := testdataFile(p.kustomizeDir)

var cmds []string
Expand All @@ -171,7 +182,7 @@ func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string)
cmds,
fmt.Sprintf("kustomize edit add label %s:%s -f", LabelKey, LabelValue),
"kustomize build | kubectl apply -f -",
fmt.Sprintf("kubectl wait pod -l %s=%s --for condition=%s --timeout=120s", LabelKey, LabelValue, waitCondition),
waitCmd,
)

for _, cmd := range cmds {
Expand All @@ -188,13 +199,15 @@ func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string)
}
}

typeCheckCmd := fmt.Sprintf("kubectl get pv $(%s) -o jsonpath='{.spec.%s}'", "kubectl get pv -o jsonpath='{.items[0].metadata.name}'", volumeType)
c := createCmd(p.T(), typeCheckCmd, kustomizeDir, p.config.envs(), nil)
typeCheckOutput, err := c.CombinedOutput()
if err != nil {
p.FailNow("", "failed to check volume type: %v", err)
}
if len(typeCheckOutput) == 0 || !strings.Contains(string(typeCheckOutput), "path") {
p.FailNow("volume Type not correct")
if volumeType != "" {
typeCheckCmd := fmt.Sprintf("kubectl get pv $(%s) -o jsonpath='{.spec.%s}'", "kubectl get pv -o jsonpath='{.items[0].metadata.name}'", volumeType)
c := createCmd(p.T(), typeCheckCmd, kustomizeDir, p.config.envs(), nil)
typeCheckOutput, err := c.CombinedOutput()
if err != nil {
p.FailNow("", "failed to check volume type: %v", err)
}
if len(typeCheckOutput) == 0 || !strings.Contains(string(typeCheckOutput), "path") {
p.FailNow("volume Type not correct")
}
}
}
4 changes: 4 additions & 0 deletions test/testdata/kind-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,8 @@ kind: Cluster
nodes:
- role: control-plane
- role: worker
labels:
kubernetes.io/hostname: kind-worker1.hostname
- role: worker
labels:
kubernetes.io/hostname: kind-worker2.hostname
2 changes: 1 addition & 1 deletion test/testdata/pod-with-node-affinity/patch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@ spec:
- key: kubernetes.io/hostname
operator: In
values:
- kind-worker
- kind-worker1.hostname
10 changes: 10 additions & 0 deletions test/testdata/pv-with-legacy-affinity/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../../deploy
- pv.yaml
commonLabels:
app: local-path-provisioner
images:
- name: rancher/local-path-provisioner
newTag: dev
38 changes: 38 additions & 0 deletions test/testdata/pv-with-legacy-affinity/pv.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
local.path.provisioner/selected-node: kind-worker
pv.kubernetes.io/provisioned-by: rancher.io/local-path
finalizers:
- kubernetes.io/pv-protection
labels:
test/avoid-cleanup: "true"
name: pvc-to-clean-up
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 100Mi
hostPath:
path: /opt/local-path-provisioner/default/local-path-pvc
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- kind-worker1.hostname
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: no-such-pvc
namespace: default
# The PVC "definitely doesn't exist any more"
resourceVersion: "1"
uid: 12345678-1234-5678-9abc-123456789abc
persistentVolumeReclaimPolicy: Delete
storageClassName: local-path-custom-path-pattern
volumeMode: Filesystem
2 changes: 1 addition & 1 deletion test/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func testdataFile(fields ...string) string {
func deleteKustomizeDeployment(t *testing.T, kustomizeDir string, envs []string) error {
_, err := runCmd(
t,
"kustomize build | kubectl delete --timeout=180s -f -",
"kustomize build | kubectl delete --timeout=180s -f - -l 'test/avoid-cleanup!=true'",
testdataFile(kustomizeDir),
envs,
nil,
Expand Down

0 comments on commit 59a15da

Please sign in to comment.