diff --git a/pkg/controller/clone/planner.go b/pkg/controller/clone/planner.go index 78f146645c..396b3c8c90 100644 --- a/pkg/controller/clone/planner.go +++ b/pkg/controller/clone/planner.go @@ -11,6 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -58,6 +59,13 @@ var coreTypesCreated = []client.Object{ &corev1.Pod{}, } +// all types that may have been created +var listTypesToDelete = []client.ObjectList{ + &corev1.PersistentVolumeClaimList{}, + &corev1.PodList{}, + &snapshotv1.VolumeSnapshotList{}, +} + // AddCoreWatches watches "core" types func (p *Planner) AddCoreWatches(log logr.Logger) error { p.watchMutex.Lock() @@ -129,6 +137,26 @@ func (p *Planner) Plan(ctx context.Context, args *PlanArgs) ([]Phase, error) { return nil, fmt.Errorf("unknown strategy/source %s", string(args.Strategy)) } +func (p *Planner) Cleanup(ctx context.Context, log logr.Logger, owner client.Object) error { + log.V(3).Info("Cleaning up for obj", "obj", owner) + + for _, lt := range listTypesToDelete { + ls, err := labels.Parse(fmt.Sprintf("%s=%s", p.OwnershipLabel, string(owner.GetUID()))) + if err != nil { + return err + } + + lo := &client.ListOptions{ + LabelSelector: ls, + } + if err := cc.BulkDeleteResources(ctx, p.Client, lt, lo); err != nil { + return err + } + } + + return nil +} + func (p *Planner) watchSnapshots(ctx context.Context, log logr.Logger) error { p.watchMutex.Lock() defer p.watchMutex.Unlock() diff --git a/pkg/controller/clone/planner_test.go b/pkg/controller/clone/planner_test.go index 49204f83e9..52299c377d 100644 --- a/pkg/controller/clone/planner_test.go +++ b/pkg/controller/clone/planner_test.go @@ -25,6 +25,7 @@ import ( snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -47,6 +48,7 @@ var _ = Describe("Planner test", func() { namespace = "ns" sourceName = "source" targetName = "target" + ownerLabel = "label" ) var ( @@ -75,7 +77,7 @@ var _ = Describe("Planner test", func() { return &Planner{ RootObjectType: &corev1.PersistentVolumeClaimList{}, - OwnershipLabel: "label", + OwnershipLabel: ownerLabel, UIDField: "uid", Image: "image", PullPolicy: corev1.PullIfNotPresent, @@ -369,7 +371,7 @@ var _ = Describe("Planner test", func() { }) }) - Context("Paln tests", func() { + Context("Plan tests", func() { cdiConfig := &cdiv1.CDIConfig{ ObjectMeta: metav1.ObjectMeta{ Name: "config", @@ -510,4 +512,53 @@ var _ = Describe("Planner test", func() { validateRebindPhase(planner, args, plan[2]) }) }) + + Context("Cleanup tests", func() { + tempResources := func() []runtime.Object { + target := createTargetClaim() + return []runtime.Object{ + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "tmpClaim", + Labels: map[string]string{ + ownerLabel: string(target.UID), + }, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "tmpPod", + Labels: map[string]string{ + ownerLabel: string(target.UID), + }, + }, + }, + &snapshotv1.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "tmpSnapshot", + Labels: map[string]string{ + ownerLabel: string(target.UID), + }, + }, + }, + } + } + + It("should cleanup tmp resources", func() { + tempObjs := tempResources() + target := createTargetClaim() + planner := createPlanner(tempObjs...) + err := planner.Cleanup(context.Background(), log, target) + Expect(err).ToNot(HaveOccurred()) + for _, r := range tempResources() { + co := r.(client.Object) + err = planner.Client.Get(context.Background(), client.ObjectKeyFromObject(co), co) + Expect(err).To(HaveOccurred()) + Expect(k8serrors.IsNotFound(err)).To(BeTrue()) + } + }) + }) }) diff --git a/pkg/controller/populators/clone-populator.go b/pkg/controller/populators/clone-populator.go index fb096267a4..3cc847fae6 100644 --- a/pkg/controller/populators/clone-populator.go +++ b/pkg/controller/populators/clone-populator.go @@ -22,11 +22,9 @@ import ( "time" "github.com/go-logr/logr" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -63,6 +61,7 @@ const ( type Planner interface { ChooseStrategy(context.Context, *clone.ChooseStrategyArgs) (*cdiv1.CDICloneStrategy, error) Plan(context.Context, *clone.PlanArgs) ([]clone.Phase, error) + Cleanup(context.Context, logr.Logger, client.Object) error } // ClonePopulatorReconciler reconciles PVCs with VolumeCloneSources @@ -260,7 +259,7 @@ func (r *ClonePopulatorReconciler) reconcilePending(ctx context.Context, log log func (r *ClonePopulatorReconciler) reconcileDone(ctx context.Context, log logr.Logger, pvc *corev1.PersistentVolumeClaim) (reconcile.Result, error) { log.V(3).Info("executing cleanup") - if err := r.cleanup(ctx, log, pvc); err != nil { + if err := r.planner.Cleanup(ctx, log, pvc); err != nil { return reconcile.Result{}, err } @@ -269,31 +268,6 @@ func (r *ClonePopulatorReconciler) reconcileDone(ctx context.Context, log logr.L return reconcile.Result{}, r.client.Update(ctx, pvc) } -func (r *ClonePopulatorReconciler) cleanup(ctx context.Context, log logr.Logger, owner client.Object) error { - log.V(3).Info("Cleaning up for obj", "obj", owner) - listTypes := []client.ObjectList{ - &corev1.PersistentVolumeClaimList{}, - &snapshotv1.VolumeSnapshotList{}, - &corev1.PodList{}, - } - - for _, lt := range listTypes { - ls, err := labels.Parse(fmt.Sprintf("%s=%s", LabelOwnedByUID, string(owner.GetUID()))) - if err != nil { - return err - } - - lo := &client.ListOptions{ - LabelSelector: ls, - } - if err := cc.BulkDeleteResources(ctx, r.client, lt, lo); err != nil { - return err - } - } - - return nil -} - func (r *ClonePopulatorReconciler) initTargetClaim(ctx context.Context, pvc *corev1.PersistentVolumeClaim, vcs *cdiv1.VolumeCloneSource, cs cdiv1.CDICloneStrategy) (bool, error) { claimCpy := pvc.DeepCopy() clone.AddCommonClaimLabels(claimCpy) diff --git a/pkg/controller/populators/clone-populator_test.go b/pkg/controller/populators/clone-populator_test.go index a72dcb9cfb..25b39556ae 100644 --- a/pkg/controller/populators/clone-populator_test.go +++ b/pkg/controller/populators/clone-populator_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "github.com/go-logr/logr" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -27,7 +28,6 @@ import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -131,38 +131,6 @@ var _ = Describe("Clone populator tests", func() { return target } - tempResources := func() []runtime.Object { - return []runtime.Object{ - &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "tmpClaim", - Labels: map[string]string{ - LabelOwnedByUID: "uid", - }, - }, - }, - &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "tmpPod", - Labels: map[string]string{ - LabelOwnedByUID: "uid", - }, - }, - }, - &snapshotv1.VolumeSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "tmpSnapshot", - Labels: map[string]string{ - LabelOwnedByUID: "uid", - }, - }, - }, - } - } - getTarget := func(c client.Client) *corev1.PersistentVolumeClaim { target, _ := targetAndDataSource() result := &corev1.PersistentVolumeClaim{} @@ -354,38 +322,16 @@ var _ = Describe("Clone populator tests", func() { Expect(pvc.Annotations[AnnClonePhase]).To(Equal("Succeeded")) }) - It("should remove finalizer and cleanup when succeeded", func() { - tempObjs := tempResources() + It("should remove finalizer and call cleanup when succeeded", func() { target := succeededTarget() - allResources := append(tempObjs, target) - reconciler := createClonePopulatorReconciler(allResources...) + reconciler := createClonePopulatorReconciler(target) + fp := &fakePlanner{} + reconciler.planner = fp result, err := reconciler.Reconcile(context.Background(), nn) isDefaultResult(result, err) pvc := getTarget(reconciler.client) Expect(pvc.Finalizers).ToNot(ContainElement(cloneFinalizer)) - for _, r := range tempResources() { - co := r.(client.Object) - err = reconciler.client.Get(context.Background(), client.ObjectKeyFromObject(co), co) - Expect(err).To(HaveOccurred()) - Expect(k8serrors.IsNotFound(err)).To(BeTrue()) - } - }) - - It("should remove finalizer and cleanup when deleted while in progress", func() { - tempObjs := tempResources() - target, _ := initinializedTargetAndDataSource() - ts := metav1.Now() - target.DeletionTimestamp = &ts - allResources := append(tempObjs, target) - reconciler := createClonePopulatorReconciler(allResources...) - result, err := reconciler.Reconcile(context.Background(), nn) - isDefaultResult(result, err) - for _, r := range tempResources() { - co := r.(client.Object) - err = reconciler.client.Get(context.Background(), client.ObjectKeyFromObject(co), co) - Expect(err).To(HaveOccurred()) - Expect(k8serrors.IsNotFound(err)).To(BeTrue()) - } + Expect(fp.cleanupCalled).To(BeTrue()) }) }) @@ -395,6 +341,7 @@ type fakePlanner struct { chooseStrategyError error planResult []clone.Phase planError error + cleanupCalled bool } func (p *fakePlanner) ChooseStrategy(ctx context.Context, args *clone.ChooseStrategyArgs) (*cdiv1.CDICloneStrategy, error) { @@ -405,6 +352,11 @@ func (p *fakePlanner) Plan(ctx context.Context, args *clone.PlanArgs) ([]clone.P return p.planResult, p.planError } +func (p *fakePlanner) Cleanup(ctx context.Context, log logr.Logger, owner client.Object) error { + p.cleanupCalled = true + return nil +} + type fakePhase struct { name string result *reconcile.Result diff --git a/tests/clone-populator_test.go b/tests/clone-populator_test.go index acb5ed33b3..8a935d778f 100644 --- a/tests/clone-populator_test.go +++ b/tests/clone-populator_test.go @@ -139,7 +139,7 @@ var _ = Describe("Clone Populator tests", func() { } }) - It("should should do filesystem to filesystem clone", func() { + It("should do filesystem to filesystem clone", func() { source := createSource(defaultSize, corev1.PersistentVolumeFilesystem) createDataSource() target := createTarget(defaultSize, corev1.PersistentVolumeFilesystem) @@ -149,7 +149,7 @@ var _ = Describe("Clone Populator tests", func() { Expect(targetHash).To(Equal(sourceHash)) }) - It("should should do filesystem to filesystem clone, source created after target", func() { + It("should do filesystem to filesystem clone, source created after target", func() { createDataSource() target := createTarget(defaultSize, corev1.PersistentVolumeFilesystem) source := createSource(defaultSize, corev1.PersistentVolumeFilesystem) @@ -159,7 +159,7 @@ var _ = Describe("Clone Populator tests", func() { Expect(targetHash).To(Equal(sourceHash)) }) - It("should should do filesystem to filesystem clone, dataSource created after target", func() { + It("should do filesystem to filesystem clone, dataSource created after target", func() { source := createSource(defaultSize, corev1.PersistentVolumeFilesystem) target := createTarget(defaultSize, corev1.PersistentVolumeFilesystem) createDataSource() @@ -169,7 +169,7 @@ var _ = Describe("Clone Populator tests", func() { Expect(targetHash).To(Equal(sourceHash)) }) - It("should should do filesystem to filesystem clone (bigger target)", func() { + It("should do filesystem to filesystem clone (bigger target)", func() { source := createSource(defaultSize, corev1.PersistentVolumeFilesystem) createDataSource() target := createTarget(biggerSize, corev1.PersistentVolumeFilesystem) @@ -181,7 +181,7 @@ var _ = Describe("Clone Populator tests", func() { Expect(targetHash).To(Equal(sourceHash)) }) - It("should should do block to filesystem clone", func() { + It("should do block to filesystem clone", func() { if !f.IsBlockVolumeStorageClassAvailable() { Skip("Storage Class for block volume is not available") } @@ -196,7 +196,7 @@ var _ = Describe("Clone Populator tests", func() { Expect(targetHash).To(Equal(sourceHash)) }) - It("should should do filesystem to block clone", func() { + It("should do filesystem to block clone", func() { if !f.IsBlockVolumeStorageClassAvailable() { Skip("Storage Class for block volume is not available") } @@ -211,7 +211,7 @@ var _ = Describe("Clone Populator tests", func() { Expect(targetHash).To(Equal(sourceHash)) }) - It("should should do csi clone if possible", func() { + It("should do csi clone if possible", func() { if !f.IsCSIVolumeCloneStorageClassAvailable() { Skip("CSI Clone does not work without a capable storage class") } @@ -224,4 +224,18 @@ var _ = Describe("Clone Populator tests", func() { targetHash := getHash(target, 0) Expect(targetHash).To(Equal(sourceHash)) }) + + It("should do snapshot clone if possible", func() { + if !f.IsSnapshotStorageClassAvailable() { + Skip("Snapshot Clone does not work without a capable storage class") + } + source := createSource(defaultSize, corev1.PersistentVolumeFilesystem) + createDataSource() + target := createTargetWithStrategy(defaultSize, corev1.PersistentVolumeFilesystem, "snapshot") + sourceHash := getHash(source, 0) + target = waitSucceeded(target) + Expect(target.Annotations["cdi.kubevirt.io/cloneType"]).To(Equal("snapshot")) + targetHash := getHash(target, 0) + Expect(targetHash).To(Equal(sourceHash)) + }) }) diff --git a/tests/framework/pvc.go b/tests/framework/pvc.go index 1cc4be1129..fbc5f39e15 100644 --- a/tests/framework/pvc.go +++ b/tests/framework/pvc.go @@ -498,6 +498,7 @@ func addVolumeMounts(pvc *k8sv1.PersistentVolumeClaim, volumeName string) []k8sv { Name: volumeName, MountPath: utils.DefaultPvcMountPath, + ReadOnly: true, }, } return volumeMounts