diff --git a/pkg/controller/clone/rebind_test.go b/pkg/controller/clone/rebind_test.go index fbbf12369e..d1577dfb5c 100644 --- a/pkg/controller/clone/rebind_test.go +++ b/pkg/controller/clone/rebind_test.go @@ -180,7 +180,7 @@ var _ = Describe("RebindPhase test", func() { p := createRebindPhase(createTarget(), source, volume) result, err := p.Reconcile(context.Background()) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("PV pv bound to unexpected claim")) + Expect(err.Error()).To(Equal("PV pv bound to unexpected claim foo")) Expect(result).To(BeNil()) }) }) diff --git a/pkg/controller/common/BUILD.bazel b/pkg/controller/common/BUILD.bazel index c0c3b2d94d..4e59a426b3 100644 --- a/pkg/controller/common/BUILD.bazel +++ b/pkg/controller/common/BUILD.bazel @@ -54,7 +54,9 @@ go_test( "//vendor/github.com/onsi/ginkgo/extensions/table:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/log:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/log/zap:go_default_library", ], diff --git a/pkg/controller/common/util.go b/pkg/controller/common/util.go index 6cbcf8ff7a..80f3e99de6 100644 --- a/pkg/controller/common/util.go +++ b/pkg/controller/common/util.go @@ -108,8 +108,8 @@ const ( // AnnMultiStageImportDone marks a multi-stage import as totally finished AnnMultiStageImportDone = AnnAPIGroup + "/storage.checkpoint.done" - // AnnImportProgressReporting stores the current progress of the import process as a percetange - AnnImportProgressReporting = AnnAPIGroup + "/storage.import.progress" + // AnnPopulatorProgress is a standard annotation that can be used progress reporting + AnnPopulatorProgress = AnnAPIGroup + "/storage.populator.progress" // AnnPreallocationRequested provides a const to indicate whether preallocation should be performed on the PV AnnPreallocationRequested = AnnAPIGroup + "/storage.preallocation.requested" @@ -204,6 +204,8 @@ const ( // AnnPopulatorKind annotation is added to a PVC' to specify the population kind, so it's later // checked by the common populator watches. AnnPopulatorKind = AnnAPIGroup + "/storage.populator.kind" + //AnnUsePopulator annotation indicates if the datavolume population will use populators + AnnUsePopulator = AnnAPIGroup + "/storage.usePopulator" //AnnDefaultStorageClass is the annotation indicating that a storage class is the default one. AnnDefaultStorageClass = "storageclass.kubernetes.io/is-default-class" @@ -1498,7 +1500,7 @@ func UpdateImageIOAnnotations(annotations map[string]string, imageio *cdiv1.Data // IsPVBoundToPVC checks if a PV is bound to a specific PVC func IsPVBoundToPVC(pv *corev1.PersistentVolume, pvc *corev1.PersistentVolumeClaim) bool { claimRef := pv.Spec.ClaimRef - return claimRef.Name == pvc.Name && claimRef.Namespace == pvc.Namespace && claimRef.UID == pvc.UID + return claimRef != nil && claimRef.Name == pvc.Name && claimRef.Namespace == pvc.Namespace && claimRef.UID == pvc.UID } // Rebind binds the PV of source to target @@ -1514,18 +1516,21 @@ func Rebind(ctx context.Context, c client.Client, source, target *corev1.Persist } // Examine the claimref for the PV and see if it's still bound to PVC' + if pv.Spec.ClaimRef == nil { + return fmt.Errorf("PV %s claimRef is nil", pv.Name) + } + if !IsPVBoundToPVC(pv, source) { // Something is not right if the PV is neither bound to PVC' nor target PVC if !IsPVBoundToPVC(pv, target) { klog.Errorf("PV bound to unexpected PVC: Could not rebind to target PVC '%s'", target.Name) - return fmt.Errorf("PV %s bound to unexpected claim", pv.Name) + return fmt.Errorf("PV %s bound to unexpected claim %s", pv.Name, pv.Spec.ClaimRef.Name) } // our work is done return nil } // Rebind PVC to target PVC - pv.Annotations = make(map[string]string) pv.Spec.ClaimRef = &corev1.ObjectReference{ Namespace: target.Namespace, Name: target.Name, @@ -1735,3 +1740,20 @@ func isCrdDeployed(c client.Client, name, version string, log logr.Logger) bool func IsSnapshotReady(snapshot *snapshotv1.VolumeSnapshot) bool { return snapshot.Status != nil && snapshot.Status.ReadyToUse != nil && *snapshot.Status.ReadyToUse } + +// GetResource updates given obj with the data of the object with the same name and namespace +func GetResource(ctx context.Context, c client.Client, namespace, name string, obj client.Object) (bool, error) { + obj.SetNamespace(namespace) + obj.SetName(name) + + err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj) + if err != nil { + if k8serrors.IsNotFound(err) { + return false, nil + } + + return false, err + } + + return true, nil +} diff --git a/pkg/controller/common/util_test.go b/pkg/controller/common/util_test.go index c31fd1562f..7f2b2320fc 100644 --- a/pkg/controller/common/util_test.go +++ b/pkg/controller/common/util_test.go @@ -7,7 +7,9 @@ import ( "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" ) @@ -77,6 +79,118 @@ var _ = Describe("GetDefaultStorageClass", func() { }) }) +var _ = Describe("Rebind", func() { + It("Should return error if PV doesn't exist", func() { + client := CreateClient() + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPVC", + Namespace: "namespace", + }, + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "testPV", + }, + } + err := Rebind(context.Background(), client, pvc, pvc) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + + It("Should return error if bound to unexpected claim", func() { + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPVC", + Namespace: "namespace", + }, + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "testPV", + }, + } + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + }, + Spec: v1.PersistentVolumeSpec{ + ClaimRef: &v1.ObjectReference{ + Name: "anotherPVC", + Namespace: "namespace", + UID: "uid", + }, + }, + } + client := CreateClient(pv) + err := Rebind(context.Background(), client, pvc, pvc) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("PV testPV bound to unexpected claim anotherPVC")) + }) + It("Should return nil if bound to target claim", func() { + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPVC", + Namespace: "namespace", + }, + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "testPV", + }, + } + targetPVC := pvc.DeepCopy() + targetPVC.Name = "targetPVC" + targetPVC.UID = "uid" + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + }, + Spec: v1.PersistentVolumeSpec{ + ClaimRef: &v1.ObjectReference{ + Name: "targetPVC", + Namespace: "namespace", + UID: "uid", + }, + }, + } + client := CreateClient(pv) + err := Rebind(context.Background(), client, pvc, targetPVC) + Expect(err).ToNot(HaveOccurred()) + }) + It("Should rebind pv to target claim", func() { + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPVC", + Namespace: "namespace", + }, + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "testPV", + }, + } + targetPVC := pvc.DeepCopy() + targetPVC.Name = "targetPVC" + pvc.UID = "uid" + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + }, + Spec: v1.PersistentVolumeSpec{ + ClaimRef: &v1.ObjectReference{ + Name: "testPVC", + Namespace: "namespace", + UID: "uid", + }, + }, + } + AddAnnotation(pv, "someAnno", "somevalue") + client := CreateClient(pv) + err := Rebind(context.Background(), client, pvc, targetPVC) + Expect(err).ToNot(HaveOccurred()) + updatedPV := &v1.PersistentVolume{} + key := types.NamespacedName{Name: pv.Name, Namespace: pv.Namespace} + err = client.Get(context.TODO(), key, updatedPV) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedPV.Spec.ClaimRef.Name).To(Equal(targetPVC.Name)) + //make sure annotations of pv from before rebind dont get deleted + Expect(pv.Annotations["someAnno"]).To(Equal("somevalue")) + }) +}) + func createPvcNoSize(name, ns string, annotations, labels map[string]string) *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/datavolume/BUILD.bazel b/pkg/controller/datavolume/BUILD.bazel index f4501bed34..4d62031395 100644 --- a/pkg/controller/datavolume/BUILD.bazel +++ b/pkg/controller/datavolume/BUILD.bazel @@ -20,6 +20,7 @@ go_library( deps = [ "//pkg/common:go_default_library", "//pkg/controller/common:go_default_library", + "//pkg/controller/populators:go_default_library", "//pkg/feature-gates:go_default_library", "//pkg/token:go_default_library", "//pkg/util:go_default_library", diff --git a/pkg/controller/datavolume/controller-base.go b/pkg/controller/datavolume/controller-base.go index e6ce8db37d..e0dde68306 100644 --- a/pkg/controller/datavolume/controller-base.go +++ b/pkg/controller/datavolume/controller-base.go @@ -103,16 +103,18 @@ type dvSyncState struct { pvc *corev1.PersistentVolumeClaim pvcSpec *corev1.PersistentVolumeClaimSpec dvSyncResult + usePopulator bool } // ReconcilerBase members type ReconcilerBase struct { - client client.Client - recorder record.EventRecorder - scheme *runtime.Scheme - log logr.Logger - featureGates featuregates.FeatureGates - installerLabels map[string]string + client client.Client + recorder record.EventRecorder + scheme *runtime.Scheme + log logr.Logger + featureGates featuregates.FeatureGates + installerLabels map[string]string + shouldUpdateProgress bool } func pvcIsPopulated(pvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume) bool { @@ -429,13 +431,14 @@ func (r *ReconcilerBase) syncDvPvcState(log logr.Logger, req reconcile.Request, return syncState, err } - if dv.DeletionTimestamp != nil { - log.Info("DataVolume marked for deletion, cleaning up") - if cleanup != nil { - if err := cleanup(&syncState); err != nil { - return syncState, err - } + if cleanup != nil { + if err := cleanup(&syncState); err != nil { + return syncState, err } + } + + if dv.DeletionTimestamp != nil { + log.Info("DataVolume marked for deletion") syncState.result = &reconcile.Result{} return syncState, nil } @@ -455,6 +458,12 @@ func (r *ReconcilerBase) syncDvPvcState(log logr.Logger, req reconcile.Request, return syncState, err } + syncState.usePopulator, err = r.shouldUseCDIPopulator(&syncState) + if err != nil { + return syncState, err + } + updateDataVolumeUseCDIPopulator(&syncState) + if err := r.handleStaticVolume(&syncState, log); err != nil || syncState.result != nil { return syncState, err } @@ -679,6 +688,19 @@ func (r *ReconcilerBase) reconcileProgressUpdate(datavolume *cdiv1.DataVolume, p datavolume.Status.Progress = "N/A" } + if !r.shouldUpdateProgress { + return nil + } + + if usePopulator, _ := CheckPVCUsingPopulators(pvc); usePopulator { + if progress, ok := pvc.Annotations[cc.AnnPopulatorProgress]; ok { + datavolume.Status.Progress = cdiv1.DataVolumeProgress(progress) + } else { + datavolume.Status.Progress = "N/A" + } + return nil + } + if datavolume.Spec.Source != nil && datavolume.Spec.Source.PVC != nil { podNamespace = datavolume.Spec.Source.PVC.Namespace } else { @@ -792,15 +814,9 @@ func (r ReconcilerBase) updateStatus(req reconcile.Request, phaseSync *statusPha } else { switch pvc.Status.Phase { case corev1.ClaimPending: - shouldBeMarkedWaitForFirstConsumer, err := r.shouldBeMarkedWaitForFirstConsumer(pvc) - if err != nil { + if err := r.updateStatusPVCPending(pvc, dvc, dataVolumeCopy, &event); err != nil { return reconcile.Result{}, err } - if shouldBeMarkedWaitForFirstConsumer { - dataVolumeCopy.Status.Phase = cdiv1.WaitForFirstConsumer - } else { - dataVolumeCopy.Status.Phase = cdiv1.Pending - } case corev1.ClaimBound: switch dataVolumeCopy.Status.Phase { case cdiv1.Pending: @@ -844,6 +860,38 @@ func (r ReconcilerBase) updateStatus(req reconcile.Request, phaseSync *statusPha return result, r.emitEvent(dv, dataVolumeCopy, curPhase, currentCond, &event) } +func (r ReconcilerBase) updateStatusPVCPending(pvc *corev1.PersistentVolumeClaim, dvc dvController, dataVolumeCopy *cdiv1.DataVolume, event *Event) error { + usePopulator, err := CheckPVCUsingPopulators(pvc) + if err != nil { + return err + } + if usePopulator { + // when using populators the target pvc phase will stay pending until the population completes, + // hence if not wffc we should update the dv phase according to the pod phase + shouldBeMarkedPendingPopulation, err := r.shouldBeMarkedPendingPopulation(pvc) + if err != nil { + return err + } + if shouldBeMarkedPendingPopulation { + dataVolumeCopy.Status.Phase = cdiv1.PendingPopulation + } else if err := dvc.updateStatusPhase(pvc, dataVolumeCopy, event); err != nil { + return err + } + return nil + } + + shouldBeMarkedWaitForFirstConsumer, err := r.shouldBeMarkedWaitForFirstConsumer(pvc) + if err != nil { + return err + } + if shouldBeMarkedWaitForFirstConsumer { + dataVolumeCopy.Status.Phase = cdiv1.WaitForFirstConsumer + } else { + dataVolumeCopy.Status.Phase = cdiv1.Pending + } + return nil +} + func (r *ReconcilerBase) updateConditions(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim, reason, message string) { var anno map[string]string @@ -893,8 +941,9 @@ func (r *ReconcilerBase) emitFailureConditionEvent(dataVolume *cdiv1.DataVolume, if curReady == nil || curBound == nil || curRunning == nil { return } - if curReady.Status == corev1.ConditionFalse && curRunning.Status == corev1.ConditionFalse && curBound.Status == corev1.ConditionTrue { - //Bound, not ready, and not running + if curReady.Status == corev1.ConditionFalse && curRunning.Status == corev1.ConditionFalse && + dvBoundOrPopulationInProgress(dataVolume, curBound) { + //Bound or in progress, not ready, and not running if curRunning.Message != "" && orgRunning.Message != curRunning.Message { r.recorder.Event(dataVolume, corev1.EventTypeWarning, curRunning.Reason, curRunning.Message) } @@ -1066,9 +1115,19 @@ func newLongTermCloneTokenGenerator(key *rsa.PrivateKey) token.Generator { return token.NewGenerator(common.ExtendedCloneTokenIssuer, key, 10*365*24*time.Hour) } +// storageClassWaitForFirstConsumer returns if the binding mode of a given storage class is WFFC +func (r *ReconcilerBase) storageClassWaitForFirstConsumer(storageClass *string) (bool, error) { + storageClassBindingMode, err := r.getStorageClassBindingMode(storageClass) + if err != nil { + return false, err + } + + return storageClassBindingMode != nil && *storageClassBindingMode == storagev1.VolumeBindingWaitForFirstConsumer, nil +} + // shouldBeMarkedWaitForFirstConsumer decided whether we should mark DV as WFFC func (r *ReconcilerBase) shouldBeMarkedWaitForFirstConsumer(pvc *corev1.PersistentVolumeClaim) (bool, error) { - storageClassBindingMode, err := r.getStorageClassBindingMode(pvc.Spec.StorageClassName) + wffc, err := r.storageClassWaitForFirstConsumer(pvc.Spec.StorageClassName) if err != nil { return false, err } @@ -1078,13 +1137,23 @@ func (r *ReconcilerBase) shouldBeMarkedWaitForFirstConsumer(pvc *corev1.Persiste return false, err } - res := honorWaitForFirstConsumerEnabled && - storageClassBindingMode != nil && *storageClassBindingMode == storagev1.VolumeBindingWaitForFirstConsumer && + res := honorWaitForFirstConsumerEnabled && wffc && pvc.Status.Phase == corev1.ClaimPending return res, nil } +// shouldBeMarkedPendingPopulation decides whether we should mark DV as PendingPopulation +func (r *ReconcilerBase) shouldBeMarkedPendingPopulation(pvc *corev1.PersistentVolumeClaim) (bool, error) { + wffc, err := r.storageClassWaitForFirstConsumer(pvc.Spec.StorageClassName) + if err != nil { + return false, err + } + nodeName := pvc.Annotations[cc.AnnSelectedNode] + + return wffc && nodeName == "", nil +} + // handlePvcCreation works as a wrapper for non-clone PVC creation and error handling func (r *ReconcilerBase) handlePvcCreation(log logr.Logger, syncState *dvSyncState, pvcModifier pvcModifierFunc) error { if syncState.pvc != nil { @@ -1109,24 +1178,67 @@ func (r *ReconcilerBase) handlePvcCreation(log logr.Logger, syncState *dvSyncSta return nil } -// storageClassCSIDriverExists returns true if the passed storage class has CSI drivers available -func (r *ReconcilerBase) storageClassCSIDriverExists(storageClassName *string) (bool, error) { - log := r.log.WithName("getCsiDriverForStorageClass").V(3) +// shouldUseCDIPopulator returns if the population of the PVC should be done using +// CDI populators. +// Currently it will use populators only if: +// * no podRetainAfterCompletion or immediateBinding annotations +// * source is not VDDK, Imageio, PVC, Snapshot +// * storageClass bindingMode is not wffc while honorWaitForFirstConsumer feature gate is disabled +// * storageClass used is CSI storageClass +func (r *ReconcilerBase) shouldUseCDIPopulator(syncState *dvSyncState) (bool, error) { + dv := syncState.dvMutated + if usePopulator, ok := dv.Annotations[cc.AnnUsePopulator]; ok { + boolUsePopulator, err := strconv.ParseBool(usePopulator) + if err != nil { + return false, err + } + return boolUsePopulator, nil + } + log := r.log.WithValues("DataVolume", dv.Name, "Namespace", dv.Namespace) + // currently populators don't support retain pod annotation so don't use populators in that case + if retain := dv.Annotations[cc.AnnPodRetainAfterCompletion]; retain == "true" { + log.Info("Not using CDI populators, currently we don't support populators with retainAfterCompletion annotation") + return false, nil + } + // currently populators don't support immediate bind annotation so don't use populators in that case + if forceBind := dv.Annotations[cc.AnnImmediateBinding]; forceBind == "true" { + log.Info("Not using CDI populators, currently we don't support populators with bind immediate annotation") + return false, nil + } + // currently we don't support populator with import source of VDDK or Imageio + // or clone either from PVC nor snapshot + if dv.Spec.Source.Imageio != nil || dv.Spec.Source.VDDK != nil || + dv.Spec.Source.PVC != nil || dv.Spec.Source.Snapshot != nil { + log.Info("Not using CDI populators, currently we don't support populators with Imageio/VDDk/Clone source") + return false, nil + } + + wffc, err := r.storageClassWaitForFirstConsumer(syncState.pvcSpec.StorageClassName) + if err != nil { + return false, err + } - storageClass, err := cc.GetStorageClassByName(context.TODO(), r.client, storageClassName) + honorWaitForFirstConsumerEnabled, err := r.featureGates.HonorWaitForFirstConsumerEnabled() if err != nil { return false, err } - if storageClass == nil { - log.Info("Target PVC's Storage Class not found") + // currently since we don't support force bind in populators in case the storage class + // is wffc but the honorWaitForFirstConsumer feature gate is disabled we can't + // do immediate bind anyways, instead do regular flow + if wffc && !honorWaitForFirstConsumerEnabled { + log.Info("Not using CDI populators, currently we don't WFFC storage with disabled honorWaitForFirstConsumer feature gate") return false, nil } - csiDriver := &storagev1.CSIDriver{} - - if err := r.client.Get(context.TODO(), types.NamespacedName{Name: storageClass.Provisioner}, csiDriver); err != nil { + usePopulator, err := storageClassCSIDriverExists(r.client, r.log, syncState.pvcSpec.StorageClassName) + if err != nil { return false, err } + if !usePopulator { + if syncState.pvcSpec.StorageClassName != nil { + log.Info("Not using CDI populators, storage class is not a CSI storage", "storageClass", *syncState.pvcSpec.StorageClassName) + } + } - return true, nil + return usePopulator, nil } diff --git a/pkg/controller/datavolume/external-population-controller.go b/pkg/controller/datavolume/external-population-controller.go index 79f6f7511a..fdcddf8be4 100644 --- a/pkg/controller/datavolume/external-population-controller.go +++ b/pkg/controller/datavolume/external-population-controller.go @@ -61,12 +61,13 @@ func NewPopulatorController(ctx context.Context, mgr manager.Manager, log logr.L client := mgr.GetClient() reconciler := &PopulatorReconciler{ ReconcilerBase: ReconcilerBase{ - client: client, - scheme: mgr.GetScheme(), - log: log.WithName(populatorControllerName), - recorder: mgr.GetEventRecorderFor(populatorControllerName), - featureGates: featuregates.NewFeatureGates(client), - installerLabels: installerLabels, + client: client, + scheme: mgr.GetScheme(), + log: log.WithName(populatorControllerName), + recorder: mgr.GetEventRecorderFor(populatorControllerName), + featureGates: featuregates.NewFeatureGates(client), + installerLabels: installerLabels, + shouldUpdateProgress: false, }, } @@ -100,7 +101,7 @@ func (r *PopulatorReconciler) prepare(syncState *dvSyncState) error { // checkPopulationRequirements returns true if the PVC meets the requirements to be populated func (r *PopulatorReconciler) checkPopulationRequirements(pvc *corev1.PersistentVolumeClaim, dv *cdiv1.DataVolume, event *Event) (bool, error) { - csiDriverAvailable, err := r.storageClassCSIDriverExists(pvc.Spec.StorageClassName) + csiDriverAvailable, err := storageClassCSIDriverExists(r.client, r.log, pvc.Spec.StorageClassName) if err != nil && !k8serrors.IsNotFound(err) { return false, err } diff --git a/pkg/controller/datavolume/import-controller.go b/pkg/controller/datavolume/import-controller.go index b2af50fa9c..7d3d0f727d 100644 --- a/pkg/controller/datavolume/import-controller.go +++ b/pkg/controller/datavolume/import-controller.go @@ -26,14 +26,20 @@ import ( "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" cc "kubevirt.io/containerized-data-importer/pkg/controller/common" + "kubevirt.io/containerized-data-importer/pkg/controller/populators" featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" ) const ( @@ -60,6 +66,8 @@ const ( MessageImportPaused = "Multistage import into PVC %s is paused" importControllerName = "datavolume-import-controller" + + volumeImportSourcePrefix = "volume-import-source" ) // ImportReconciler members @@ -77,12 +85,13 @@ func NewImportController( client := mgr.GetClient() reconciler := &ImportReconciler{ ReconcilerBase: ReconcilerBase{ - client: client, - scheme: mgr.GetScheme(), - log: log.WithName(importControllerName), - recorder: mgr.GetEventRecorderFor(importControllerName), - featureGates: featuregates.NewFeatureGates(client), - installerLabels: installerLabels, + client: client, + scheme: mgr.GetScheme(), + log: log.WithName(importControllerName), + recorder: mgr.GetEventRecorderFor(importControllerName), + featureGates: featuregates.NewFeatureGates(client), + installerLabels: installerLabels, + shouldUpdateProgress: true, }, } @@ -103,6 +112,31 @@ func addDataVolumeImportControllerWatches(mgr manager.Manager, datavolumeControl if err := addDataVolumeControllerCommonWatches(mgr, datavolumeController, dataVolumeImport); err != nil { return err } + if err := datavolumeController.Watch(&source.Kind{Type: &cdiv1.VolumeImportSource{}}, &handler.EnqueueRequestForOwner{ + OwnerType: &cdiv1.DataVolume{}, + IsController: true, + }); err != nil { + return err + } + return nil +} + +func (r *ImportReconciler) updatePVCForPopulation(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { + if dataVolume.Spec.Source.HTTP == nil && + dataVolume.Spec.Source.S3 == nil && + dataVolume.Spec.Source.GCS == nil && + dataVolume.Spec.Source.Registry == nil && + dataVolume.Spec.Source.Imageio == nil && + dataVolume.Spec.Source.VDDK == nil && + dataVolume.Spec.Source.Blank == nil { + return errors.Errorf("no source set for import datavolume") + } + apiGroup := cc.AnnAPIGroup + pvc.Spec.DataSourceRef = &corev1.TypedObjectReference{ + APIGroup: &apiGroup, + Kind: cdiv1.VolumeImportSourceRef, + Name: volumeImportSourceName(dataVolume), + } return nil } @@ -160,13 +194,26 @@ func (r *ImportReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncR } func (r *ImportReconciler) syncImport(log logr.Logger, req reconcile.Request) (dvSyncState, error) { - syncState, syncErr := r.syncCommon(log, req, nil, nil) + syncState, syncErr := r.syncCommon(log, req, r.cleanup, nil) if syncErr != nil || syncState.result != nil { return syncState, syncErr } - if err := r.handlePvcCreation(log, &syncState, r.updateAnnotations); err != nil { + + pvcModifier := r.updateAnnotations + if syncState.usePopulator { + if syncState.dvMutated.Status.Phase != cdiv1.Succeeded { + err := r.createVolumeImportSourceCR(&syncState) + if err != nil { + return syncState, err + } + } + pvcModifier = r.updatePVCForPopulation + } + + if err := r.handlePvcCreation(log, &syncState, pvcModifier); err != nil { syncErr = err } + if syncState.pvc != nil && syncErr == nil { r.setVddkAnnotations(&syncState) syncErr = r.maybeSetPvcMultiStageAnnotation(syncState.pvc, syncState.dvMutated) @@ -174,15 +221,36 @@ func (r *ImportReconciler) syncImport(log logr.Logger, req reconcile.Request) (d return syncState, syncErr } +func (r *ImportReconciler) cleanup(syncState *dvSyncState) error { + dv := syncState.dvMutated + // The cleanup is to delete the volumeImportSourceCR which is used only with populators, + // it is owner by the DV so will be deleted when dv is deleted + // also we can already delete once dv is succeeded + usePopulator, err := checkDVUsingPopulators(syncState.dvMutated) + if err != nil { + return err + } + if usePopulator && dv.Status.Phase == cdiv1.Succeeded { + return r.deleteVolumeImportSourceCR(syncState) + } + + return nil +} + +func isPVCImportPopulation(pvc *corev1.PersistentVolumeClaim) bool { + return populators.IsPVCDataSourceRefKind(pvc, cdiv1.VolumeImportSourceRef) +} + func (r *ImportReconciler) updateStatusPhase(pvc *corev1.PersistentVolumeClaim, dataVolumeCopy *cdiv1.DataVolume, event *Event) error { phase, ok := pvc.Annotations[cc.AnnPodPhase] - if phase != string(corev1.PodSucceeded) { + importPopulation := isPVCImportPopulation(pvc) + if phase != string(corev1.PodSucceeded) && !importPopulation { _, ok := pvc.Annotations[cc.AnnImportPod] if !ok || pvc.Status.Phase != corev1.ClaimBound || pvcIsPopulated(pvc, dataVolumeCopy) { return nil } - dataVolumeCopy.Status.Phase = cdiv1.ImportScheduled } + dataVolumeCopy.Status.Phase = cdiv1.ImportScheduled if !ok { return nil } @@ -434,3 +502,78 @@ func (r *ImportReconciler) getNextCheckpoint(dataVolume *cdiv1.DataVolume, pvc * return nil } + +func volumeImportSourceName(dv *cdiv1.DataVolume) string { + return fmt.Sprintf("%s-%s", volumeImportSourcePrefix, dv.UID) +} + +func (r *ImportReconciler) createVolumeImportSourceCR(syncState *dvSyncState) error { + dv := syncState.dvMutated + importSource := &cdiv1.VolumeImportSource{} + importSourceName := volumeImportSourceName(dv) + + // check if import source already exists + if exists, err := cc.GetResource(context.TODO(), r.client, dv.Namespace, importSourceName, importSource); err != nil || exists { + return err + } + + source := &cdiv1.ImportSourceType{} + if http := dv.Spec.Source.HTTP; http != nil { + source.HTTP = http + } else if s3 := dv.Spec.Source.S3; s3 != nil { + source.S3 = s3 + } else if gcs := dv.Spec.Source.GCS; gcs != nil { + source.GCS = gcs + } else if registry := dv.Spec.Source.Registry; registry != nil { + source.Registry = registry + } else if imageio := dv.Spec.Source.Imageio; imageio != nil { + source.Imageio = imageio + } else if vddk := dv.Spec.Source.VDDK; vddk != nil { + source.VDDK = vddk + } else { + // Our dv shouldn't be without source + // Defaulting to Blank source + source.Blank = &cdiv1.DataVolumeBlankImage{} + } + + importSource = &cdiv1.VolumeImportSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: importSourceName, + Namespace: dv.Namespace, + }, + Spec: cdiv1.VolumeImportSourceSpec{ + Source: source, + ContentType: dv.Spec.ContentType, + Preallocation: dv.Spec.Preallocation, + }, + } + + if err := controllerutil.SetControllerReference(dv, importSource, r.scheme); err != nil { + return err + } + + if err := r.client.Create(context.TODO(), importSource); err != nil { + if !k8serrors.IsAlreadyExists(err) { + return err + } + } + return nil +} + +func (r *ImportReconciler) deleteVolumeImportSourceCR(syncState *dvSyncState) error { + importSourceName := volumeImportSourceName(syncState.dvMutated) + importSource := &cdiv1.VolumeImportSource{} + if err := r.client.Get(context.TODO(), types.NamespacedName{Name: importSourceName, Namespace: syncState.dvMutated.Namespace}, importSource); err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } else { + if err := r.client.Delete(context.TODO(), importSource); err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + } + + return nil +} diff --git a/pkg/controller/datavolume/import-controller_test.go b/pkg/controller/datavolume/import-controller_test.go index 3232f36aa8..e3101d1e70 100644 --- a/pkg/controller/datavolume/import-controller_test.go +++ b/pkg/controller/datavolume/import-controller_test.go @@ -33,6 +33,7 @@ import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -86,6 +87,73 @@ var _ = Describe("All DataVolume Tests", func() { } }) + It("Should create volumeImportSource if should use cdi populator", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := NewImportDataVolume("test-dv") + dv.Spec.ContentType = cdiv1.DataVolumeArchive + preallocation := true + dv.Spec.Preallocation = &preallocation + reconciler = createImportReconciler(dv, sc, csiDriver) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(dv.GetAnnotations()[AnnUsePopulator]).To(Equal("true")) + + importSource := &cdiv1.VolumeImportSource{} + importSourceName := volumeImportSourceName(dv) + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: importSourceName, Namespace: metav1.NamespaceDefault}, importSource) + Expect(err).ToNot(HaveOccurred()) + Expect(importSource.Spec.Source).ToNot(BeNil()) + Expect(importSource.Spec.ContentType).To(Equal(dv.Spec.ContentType)) + Expect(importSource.Spec.Preallocation).To(Equal(dv.Spec.Preallocation)) + Expect(importSource.OwnerReferences).To(HaveLen(1)) + or := importSource.OwnerReferences[0] + Expect(or.UID).To(Equal(dv.UID)) + }) + + It("Should delete volumeImportSource if dv succeeded and we use cdi populator", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := NewImportDataVolume("test-dv") + importSourceName := volumeImportSourceName(dv) + importSource := &cdiv1.VolumeImportSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: importSourceName, + Namespace: dv.Namespace, + }, + } + reconciler = createImportReconciler(dv, sc, csiDriver, importSource) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(dv.GetAnnotations()[AnnUsePopulator]).To(Equal("true")) + + dv.Status.Phase = cdiv1.Succeeded + err = reconciler.client.Update(context.TODO(), dv) + Expect(err).ToNot(HaveOccurred()) + + _, err = reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + + deletedImportSource := &cdiv1.VolumeImportSource{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: importSourceName, Namespace: dv.Namespace}, deletedImportSource) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + It("Should create a PVC on a valid import DV", func() { reconciler = createImportReconciler(NewImportDataVolume("test-dv")) _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) @@ -98,6 +166,86 @@ var _ = Describe("All DataVolume Tests", func() { Expect(pvc.Labels[common.KubePersistentVolumeFillingUpSuppressLabelKey]).To(Equal(common.KubePersistentVolumeFillingUpSuppressLabelValue)) }) + It("Should fail if dv source not import when use populators", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := NewImportDataVolume("test-dv") + dv.Spec.Source.HTTP = nil + reconciler = createImportReconciler(dv, sc, csiDriver) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no source set for import datavolume")) + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + + It("Should create a PVC with volumeImportSource when use populators", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := NewImportDataVolume("test-dv") + reconciler = createImportReconciler(dv, sc, csiDriver) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.Name).To(Equal("test-dv")) + Expect(pvc.Labels[common.AppKubernetesPartOfLabel]).To(Equal("testing")) + Expect(pvc.Labels[common.KubePersistentVolumeFillingUpSuppressLabelKey]).To(Equal(common.KubePersistentVolumeFillingUpSuppressLabelValue)) + Expect(pvc.Spec.DataSourceRef).ToNot(BeNil()) + importSourceName := volumeImportSourceName(dv) + Expect(pvc.Spec.DataSourceRef.Name).To(Equal(importSourceName)) + Expect(pvc.Spec.DataSourceRef.Kind).To(Equal(cdiv1.VolumeImportSourceRef)) + Expect(pvc.GetAnnotations()[AnnUsePopulator]).To(Equal("true")) + }) + + It("Should report import population progress when use populators", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := NewImportDataVolume("test-dv") + reconciler = createImportReconciler(dv, sc, csiDriver) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + dv = &cdiv1.DataVolume{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(string(dv.Status.Progress)).To(Equal("N/A")) + + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.GetAnnotations()[AnnUsePopulator]).To(Equal("true")) + + AddAnnotation(pvc, AnnPopulatorProgress, "13.45%") + err = reconciler.client.Update(context.TODO(), pvc) + Expect(err).ToNot(HaveOccurred()) + + _, err = reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + + dv = &cdiv1.DataVolume{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(dv.Status.Progress).To(BeEquivalentTo("13.45%")) + }) + It("Should pass instancetype labels from DV to PVC", func() { dv := NewImportDataVolume("test-dv") dv.Labels = map[string]string{} @@ -968,6 +1116,101 @@ var _ = Describe("All DataVolume Tests", func() { } Expect(found).To(BeTrue()) }) + It("Should set DV phase to pendingPopulation if use populators with storage class WFFC", func() { + scName := "pvc_sc_wffc" + bindingMode := storagev1.VolumeBindingWaitForFirstConsumer + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + sc.VolumeBindingMode = &bindingMode + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + importDataVolume := NewImportDataVolume("test-dv") + importDataVolume.Spec.PVC.StorageClassName = &scName + + reconciler = createImportReconciler(sc, csiDriver, importDataVolume) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + dv := &cdiv1.DataVolume{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.Name).To(Equal("test-dv")) + pvc.Status.Phase = corev1.ClaimPending + err = reconciler.client.Update(context.TODO(), pvc) + Expect(err).ToNot(HaveOccurred()) + _, err = reconciler.updateStatus(getReconcileRequest(dv), nil, reconciler) + Expect(err).ToNot(HaveOccurred()) + dv = &cdiv1.DataVolume{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(dv.Status.Phase).To(Equal(cdiv1.PendingPopulation)) + + Expect(dv.Status.Conditions).To(HaveLen(3)) + boundCondition := FindConditionByType(cdiv1.DataVolumeBound, dv.Status.Conditions) + Expect(boundCondition.Status).To(Equal(corev1.ConditionFalse)) + Expect(boundCondition.Message).To(Equal("PVC test-dv Pending")) + By("Checking events recorded") + close(reconciler.recorder.(*record.FakeRecorder).Events) + found := false + for event := range reconciler.recorder.(*record.FakeRecorder).Events { + if strings.Contains(event, "PVC test-dv Pending") { + found = true + } + } + Expect(found).To(BeTrue()) + }) + + It("Should set DV phase to ImportScheduled if use populators wffc storage class after scheduled node", func() { + scName := "pvc_sc_wffc" + bindingMode := storagev1.VolumeBindingWaitForFirstConsumer + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + sc.VolumeBindingMode = &bindingMode + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + importDataVolume := NewImportDataVolume("test-dv") + importDataVolume.Spec.PVC.StorageClassName = &scName + + reconciler = createImportReconciler(sc, csiDriver, importDataVolume) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.Name).To(Equal("test-dv")) + pvc.Status.Phase = corev1.ClaimPending + AddAnnotation(pvc, AnnSelectedNode, "node01") + err = reconciler.client.Update(context.TODO(), pvc) + Expect(err).ToNot(HaveOccurred()) + _, err = reconciler.updateStatus(getReconcileRequest(importDataVolume), nil, reconciler) + Expect(err).ToNot(HaveOccurred()) + dv := &cdiv1.DataVolume{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(dv.Status.Phase).To(Equal(cdiv1.ImportScheduled)) + + Expect(dv.Status.Conditions).To(HaveLen(3)) + boundCondition := FindConditionByType(cdiv1.DataVolumeBound, dv.Status.Conditions) + Expect(boundCondition.Status).To(Equal(corev1.ConditionFalse)) + Expect(boundCondition.Message).To(Equal("PVC test-dv Pending")) + By("Checking events recorded") + close(reconciler.recorder.(*record.FakeRecorder).Events) + found := false + for event := range reconciler.recorder.(*record.FakeRecorder).Events { + if strings.Contains(event, "PVC test-dv Pending") { + found = true + } + } + Expect(found).To(BeTrue()) + }) It("Should switch to succeeded if PVC phase is pending, but pod phase is succeeded", func() { reconciler = createImportReconciler(NewImportDataVolume("test-dv")) @@ -1292,6 +1535,146 @@ var _ = Describe("All DataVolume Tests", func() { }) }) + var _ = Describe("shouldUseCDIPopulator", func() { + scName := "test" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{ + AnnDefaultStorageClass: "true", + }, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + + DescribeTable("Should return expected result if has annotation", func(annotation, value string, expected bool) { + httpSource := &cdiv1.DataVolumeSource{ + HTTP: &cdiv1.DataVolumeSourceHTTP{}, + } + storageSpec := &cdiv1.StorageSpec{} + dv := createDataVolumeWithStorageAPI("test-dv", metav1.NamespaceDefault, httpSource, storageSpec) + AddAnnotation(dv, annotation, value) + + reconciler = createImportReconciler() + syncState := dvSyncState{ + dvMutated: dv, + } + usePopulator, err := reconciler.shouldUseCDIPopulator(&syncState) + Expect(err).ToNot(HaveOccurred()) + Expect(usePopulator).To(Equal(expected)) + }, + Entry("AnnUsePopulator=true return true", AnnUsePopulator, "true", true), + Entry("AnnUsePopulator=false return false", AnnUsePopulator, "false", false), + Entry("AnnPodRetainAfterCompletion return false", AnnPodRetainAfterCompletion, "true", false), + Entry("AnnImmediateBinding return false", AnnImmediateBinding, "true", false), + ) + + DescribeTable("Should return false if source is", func(source *cdiv1.DataVolumeSource) { + storageSpec := &cdiv1.StorageSpec{} + dv := createDataVolumeWithStorageAPI("testDV", "testNamespace", source, storageSpec) + reconciler = createImportReconciler() + syncState := dvSyncState{ + dvMutated: dv, + } + usePopulator, err := reconciler.shouldUseCDIPopulator(&syncState) + Expect(err).ToNot(HaveOccurred()) + Expect(usePopulator).To(BeFalse()) + }, + Entry("Imageio", &cdiv1.DataVolumeSource{ + Imageio: &cdiv1.DataVolumeSourceImageIO{}, + }), + Entry("VDDK", &cdiv1.DataVolumeSource{ + VDDK: &cdiv1.DataVolumeSourceVDDK{}, + }), + Entry("PVC", &cdiv1.DataVolumeSource{ + PVC: &cdiv1.DataVolumeSourcePVC{}, + }), + Entry("Snapshot", &cdiv1.DataVolumeSource{ + Snapshot: &cdiv1.DataVolumeSourceSnapshot{}, + }), + ) + + It("Should return false if storage class has wffc bindingMode and honorWaitForFirstConsumer feature gate is disabled", func() { + sc := createStorageClassWithBindingMode(scName, + map[string]string{ + AnnDefaultStorageClass: "true", + }, + storagev1.VolumeBindingWaitForFirstConsumer) + httpSource := &cdiv1.DataVolumeSource{ + HTTP: &cdiv1.DataVolumeSourceHTTP{}, + } + storageSpec := &cdiv1.StorageSpec{} + dv := createDataVolumeWithStorageAPI("test-dv", metav1.NamespaceDefault, httpSource, storageSpec) + + reconciler = createImportReconcilerWFFCDisabled(sc) + syncState := dvSyncState{ + dvMutated: dv, + pvcSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + }, + } + usePopulator, err := reconciler.shouldUseCDIPopulator(&syncState) + Expect(err).ToNot(HaveOccurred()) + Expect(usePopulator).To(BeFalse()) + }) + + It("Should return false if storage class doesnt exist", func() { + httpSource := &cdiv1.DataVolumeSource{ + HTTP: &cdiv1.DataVolumeSourceHTTP{}, + } + storageSpec := &cdiv1.StorageSpec{} + dv := createDataVolumeWithStorageAPI("test-dv", metav1.NamespaceDefault, httpSource, storageSpec) + + reconciler = createImportReconciler() + syncState := dvSyncState{ + dvMutated: dv, + pvcSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + }, + } + usePopulator, err := reconciler.shouldUseCDIPopulator(&syncState) + Expect(err).ToNot(HaveOccurred()) + Expect(usePopulator).To(BeFalse()) + }) + + It("Should return false if storage class doesnt have csi driver", func() { + httpSource := &cdiv1.DataVolumeSource{ + HTTP: &cdiv1.DataVolumeSourceHTTP{}, + } + storageSpec := &cdiv1.StorageSpec{} + dv := createDataVolumeWithStorageAPI("test-dv", metav1.NamespaceDefault, httpSource, storageSpec) + + reconciler = createImportReconciler(sc) + syncState := dvSyncState{ + dvMutated: dv, + pvcSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + }, + } + usePopulator, err := reconciler.shouldUseCDIPopulator(&syncState) + Expect(err).ToNot(HaveOccurred()) + Expect(usePopulator).To(BeFalse()) + }) + + It("Should return true if storage class has csi driver", func() { + httpSource := &cdiv1.DataVolumeSource{ + HTTP: &cdiv1.DataVolumeSourceHTTP{}, + } + storageSpec := &cdiv1.StorageSpec{} + dv := createDataVolumeWithStorageAPI("test-dv", metav1.NamespaceDefault, httpSource, storageSpec) + + reconciler = createImportReconciler(sc, csiDriver) + syncState := dvSyncState{ + dvMutated: dv, + pvcSpec: &corev1.PersistentVolumeClaimSpec{ + StorageClassName: &scName, + }, + } + usePopulator, err := reconciler.shouldUseCDIPopulator(&syncState) + Expect(err).ToNot(HaveOccurred()) + Expect(usePopulator).To(BeTrue()) + }) + }) + }) func dvPhaseTest(reconciler ReconcilerBase, dvc dvController, testDv runtime.Object, current, expected cdiv1.DataVolumePhase, pvcPhase corev1.PersistentVolumeClaimPhase, podPhase corev1.PodPhase, ann, expectedEvent string, extraAnnotations ...string) { @@ -1391,6 +1774,20 @@ func readyStatusByPhase(phase cdiv1.DataVolumePhase) corev1.ConditionStatus { } } +func createImportReconcilerWFFCDisabled(objects ...runtime.Object) *ImportReconciler { + cdiConfig := MakeEmptyCDIConfigSpec(common.ConfigName) + cdiConfig.Status = cdiv1.CDIConfigStatus{ + ScratchSpaceStorageClass: testStorageClass, + } + cdiConfig.Spec.FeatureGates = []string{} + + objs := []runtime.Object{} + objs = append(objs, objects...) + objs = append(objs, cdiConfig) + + return createImportReconcilerWithoutConfig(objs...) +} + func createImportReconciler(objects ...runtime.Object) *ImportReconciler { cdiConfig := MakeEmptyCDIConfigSpec(common.ConfigName) cdiConfig.Status = cdiv1.CDIConfigStatus{ @@ -1441,6 +1838,7 @@ func createImportReconcilerWithoutConfig(objects ...runtime.Object) *ImportRecon common.AppKubernetesPartOfLabel: "testing", common.AppKubernetesVersionLabel: "v0.0.0-tests", }, + shouldUpdateProgress: true, }, } return r diff --git a/pkg/controller/datavolume/pvc-clone-controller.go b/pkg/controller/datavolume/pvc-clone-controller.go index efa50045fe..aa01eb5c58 100644 --- a/pkg/controller/datavolume/pvc-clone-controller.go +++ b/pkg/controller/datavolume/pvc-clone-controller.go @@ -96,12 +96,13 @@ func NewPvcCloneController( reconciler := &PvcCloneReconciler{ CloneReconcilerBase: CloneReconcilerBase{ ReconcilerBase: ReconcilerBase{ - client: client, - scheme: mgr.GetScheme(), - log: log.WithName(pvcCloneControllerName), - featureGates: featuregates.NewFeatureGates(client), - recorder: mgr.GetEventRecorderFor(pvcCloneControllerName), - installerLabels: installerLabels, + client: client, + scheme: mgr.GetScheme(), + log: log.WithName(pvcCloneControllerName), + featureGates: featuregates.NewFeatureGates(client), + recorder: mgr.GetEventRecorderFor(pvcCloneControllerName), + installerLabels: installerLabels, + shouldUpdateProgress: true, }, clonerImage: clonerImage, importerImage: importerImage, @@ -233,11 +234,6 @@ func (r *PvcCloneReconciler) prepare(syncState *dvSyncState) error { if err := r.populateSourceIfSourceRef(dv); err != nil { return err } - if dv.Status.Phase == cdiv1.Succeeded { - if err := r.cleanup(syncState); err != nil { - return err - } - } return nil } @@ -338,7 +334,7 @@ func (r *PvcCloneReconciler) syncClone(log logr.Logger, req reconcile.Request) ( return syncRes, err } if selectedCloneStrategy == CsiClone { - csiDriverAvailable, err := r.storageClassCSIDriverExists(pvcSpec.StorageClassName) + csiDriverAvailable, err := storageClassCSIDriverExists(r.client, r.log, pvcSpec.StorageClassName) if err != nil && !k8serrors.IsNotFound(err) { return syncRes, err } @@ -712,6 +708,12 @@ func (r *PvcCloneReconciler) sourceInUse(dv *cdiv1.DataVolume, eventReason strin func (r *PvcCloneReconciler) cleanup(syncState *dvSyncState) error { dv := syncState.dvMutated + + // This cleanup should be done if dv is marked for deletion or in case it succeeded + if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded { + return nil + } + r.log.V(3).Info("Cleanup initiated in dv PVC clone controller") if err := r.populateSourceIfSourceRef(dv); err != nil { diff --git a/pkg/controller/datavolume/pvc-clone-controller_test.go b/pkg/controller/datavolume/pvc-clone-controller_test.go index f1865ee330..995d51c94f 100644 --- a/pkg/controller/datavolume/pvc-clone-controller_test.go +++ b/pkg/controller/datavolume/pvc-clone-controller_test.go @@ -1000,6 +1000,7 @@ func createCloneReconcilerWithoutConfig(objects ...runtime.Object) *PvcCloneReco common.AppKubernetesPartOfLabel: "testing", common.AppKubernetesVersionLabel: "v0.0.0-tests", }, + shouldUpdateProgress: true, }, tokenValidator: &FakeValidator{Match: "foobar"}, tokenGenerator: &FakeGenerator{token: "foobar"}, diff --git a/pkg/controller/datavolume/snapshot-clone-controller.go b/pkg/controller/datavolume/snapshot-clone-controller.go index 3b7addb033..2197872336 100644 --- a/pkg/controller/datavolume/snapshot-clone-controller.go +++ b/pkg/controller/datavolume/snapshot-clone-controller.go @@ -68,12 +68,13 @@ func NewSnapshotCloneController( reconciler := &SnapshotCloneReconciler{ CloneReconcilerBase: CloneReconcilerBase{ ReconcilerBase: ReconcilerBase{ - client: client, - scheme: mgr.GetScheme(), - log: log.WithName(snapshotCloneControllerName), - featureGates: featuregates.NewFeatureGates(client), - recorder: mgr.GetEventRecorderFor(snapshotCloneControllerName), - installerLabels: installerLabels, + client: client, + scheme: mgr.GetScheme(), + log: log.WithName(snapshotCloneControllerName), + featureGates: featuregates.NewFeatureGates(client), + recorder: mgr.GetEventRecorderFor(snapshotCloneControllerName), + installerLabels: installerLabels, + shouldUpdateProgress: true, }, clonerImage: clonerImage, importerImage: importerImage, @@ -130,11 +131,6 @@ func (r *SnapshotCloneReconciler) prepare(syncState *dvSyncState) error { if err := r.populateSourceIfSourceRef(dv); err != nil { return err } - if dv.Status.Phase == cdiv1.Succeeded { - if err := r.cleanup(syncState); err != nil { - return err - } - } return nil } @@ -504,6 +500,12 @@ func getTempHostAssistedSourcePvcName(dv *cdiv1.DataVolume) string { func (r *SnapshotCloneReconciler) cleanup(syncState *dvSyncState) error { dv := syncState.dvMutated + + // This cleanup should be done if dv is marked for deletion or in case it succeeded + if dv.DeletionTimestamp == nil && dv.Status.Phase != cdiv1.Succeeded { + return nil + } + r.log.V(3).Info("Cleanup initiated in dv snapshot clone controller") if err := r.populateSourceIfSourceRef(dv); err != nil { diff --git a/pkg/controller/datavolume/snapshot-clone-controller_test.go b/pkg/controller/datavolume/snapshot-clone-controller_test.go index 9be6ca9dfd..5ad27d5e92 100644 --- a/pkg/controller/datavolume/snapshot-clone-controller_test.go +++ b/pkg/controller/datavolume/snapshot-clone-controller_test.go @@ -290,6 +290,7 @@ func createSnapshotCloneReconcilerWithoutConfig(objects ...runtime.Object) *Snap common.AppKubernetesPartOfLabel: "testing", common.AppKubernetesVersionLabel: "v0.0.0-tests", }, + shouldUpdateProgress: true, }, tokenValidator: &FakeValidator{Match: "foobar"}, tokenGenerator: &FakeGenerator{token: "foobar"}, diff --git a/pkg/controller/datavolume/upload-controller.go b/pkg/controller/datavolume/upload-controller.go index 27f12043f3..42da396567 100644 --- a/pkg/controller/datavolume/upload-controller.go +++ b/pkg/controller/datavolume/upload-controller.go @@ -23,12 +23,19 @@ import ( "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" cc "kubevirt.io/containerized-data-importer/pkg/controller/common" + "kubevirt.io/containerized-data-importer/pkg/controller/populators" featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" ) const ( @@ -53,6 +60,8 @@ const ( MessageSizeDetectionPodFailed = "Size-detection pod failed due to %s" uploadControllerName = "datavolume-upload-controller" + + volumeUploadSourcePrefix = "volume-upload-source" ) // UploadReconciler members @@ -70,12 +79,13 @@ func NewUploadController( client := mgr.GetClient() reconciler := &UploadReconciler{ ReconcilerBase: ReconcilerBase{ - client: client, - scheme: mgr.GetScheme(), - log: log.WithName(uploadControllerName), - recorder: mgr.GetEventRecorderFor(uploadControllerName), - featureGates: featuregates.NewFeatureGates(client), - installerLabels: installerLabels, + client: client, + scheme: mgr.GetScheme(), + log: log.WithName(uploadControllerName), + recorder: mgr.GetEventRecorderFor(uploadControllerName), + featureGates: featuregates.NewFeatureGates(client), + installerLabels: installerLabels, + shouldUpdateProgress: false, }, } @@ -85,12 +95,39 @@ func NewUploadController( if err != nil { return nil, err } - if err := addDataVolumeControllerCommonWatches(mgr, datavolumeController, dataVolumeUpload); err != nil { + if err := addDataVolumeUploadControllerWatches(mgr, datavolumeController); err != nil { return nil, err } + return datavolumeController, nil } +func addDataVolumeUploadControllerWatches(mgr manager.Manager, datavolumeController controller.Controller) error { + if err := addDataVolumeControllerCommonWatches(mgr, datavolumeController, dataVolumeUpload); err != nil { + return err + } + if err := datavolumeController.Watch(&source.Kind{Type: &cdiv1.VolumeUploadSource{}}, &handler.EnqueueRequestForOwner{ + OwnerType: &cdiv1.DataVolume{}, + IsController: true, + }); err != nil { + return err + } + return nil +} + +func (r *UploadReconciler) updatePVCForPopulation(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { + if dataVolume.Spec.Source.Upload == nil { + return errors.Errorf("no source set for upload datavolume") + } + apiGroup := cc.AnnAPIGroup + pvc.Spec.DataSourceRef = &corev1.TypedObjectReference{ + APIGroup: &apiGroup, + Kind: cdiv1.VolumeUploadSourceRef, + Name: volumeUploadSourceName(dataVolume), + } + return nil +} + func (r *UploadReconciler) updateAnnotations(dataVolume *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if dataVolume.Spec.Source.Upload == nil { return errors.Errorf("no source set for upload datavolume") @@ -113,25 +150,58 @@ func (r *UploadReconciler) sync(log logr.Logger, req reconcile.Request) (dvSyncR } func (r *UploadReconciler) syncUpload(log logr.Logger, req reconcile.Request) (dvSyncState, error) { - syncState, syncErr := r.syncCommon(log, req, nil, nil) + syncState, syncErr := r.syncCommon(log, req, r.cleanup, nil) if syncErr != nil || syncState.result != nil { return syncState, syncErr } - if err := r.handlePvcCreation(log, &syncState, r.updateAnnotations); err != nil { + + pvcModifier := r.updateAnnotations + if syncState.usePopulator { + if syncState.dvMutated.Status.Phase != cdiv1.Succeeded { + err := r.createVolumeUploadSourceCR(&syncState) + if err != nil { + return syncState, err + } + } + pvcModifier = r.updatePVCForPopulation + } + + if err := r.handlePvcCreation(log, &syncState, pvcModifier); err != nil { syncErr = err } return syncState, syncErr } +func (r *UploadReconciler) cleanup(syncState *dvSyncState) error { + dv := syncState.dvMutated + // The cleanup is to delete the volumeUploadSourceCR which is used only with populators, + // it is owner by the DV so will be deleted when dv is deleted + // also we can already delete once dv is succeeded + usePopulator, err := checkDVUsingPopulators(syncState.dvMutated) + if err != nil { + return err + } + if usePopulator && dv.Status.Phase == cdiv1.Succeeded { + return r.deleteVolumeUploadSourceCR(syncState) + } + + return nil +} + +func isPVCUploadPopulation(pvc *corev1.PersistentVolumeClaim) bool { + return populators.IsPVCDataSourceRefKind(pvc, cdiv1.VolumeUploadSourceRef) +} + func (r *UploadReconciler) updateStatusPhase(pvc *corev1.PersistentVolumeClaim, dataVolumeCopy *cdiv1.DataVolume, event *Event) error { phase, ok := pvc.Annotations[cc.AnnPodPhase] - if phase != string(corev1.PodSucceeded) { + uploadPopulation := isPVCUploadPopulation(pvc) + if phase != string(corev1.PodSucceeded) && !uploadPopulation { _, ok = pvc.Annotations[cc.AnnUploadRequest] if !ok || pvc.Status.Phase != corev1.ClaimBound || pvcIsPopulated(pvc, dataVolumeCopy) { return nil } - dataVolumeCopy.Status.Phase = cdiv1.UploadScheduled } + dataVolumeCopy.Status.Phase = cdiv1.UploadScheduled if !ok { return nil } @@ -163,3 +233,55 @@ func (r *UploadReconciler) updateStatusPhase(pvc *corev1.PersistentVolumeClaim, } return nil } + +func volumeUploadSourceName(dv *cdiv1.DataVolume) string { + return fmt.Sprintf("%s-%s", volumeUploadSourcePrefix, dv.UID) +} + +func (r *UploadReconciler) createVolumeUploadSourceCR(syncState *dvSyncState) error { + uploadSourceName := volumeUploadSourceName(syncState.dvMutated) + uploadSource := &cdiv1.VolumeUploadSource{} + // check if uploadSource already exists + if exists, err := cc.GetResource(context.TODO(), r.client, syncState.dvMutated.Namespace, uploadSourceName, uploadSource); err != nil || exists { + return err + } + + uploadSource = &cdiv1.VolumeUploadSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: uploadSourceName, + Namespace: syncState.dv.Namespace, + }, + Spec: cdiv1.VolumeUploadSourceSpec{ + ContentType: syncState.dv.Spec.ContentType, + Preallocation: syncState.dv.Spec.Preallocation, + }, + } + + if err := controllerutil.SetControllerReference(syncState.dvMutated, uploadSource, r.scheme); err != nil { + return err + } + if err := r.client.Create(context.TODO(), uploadSource); err != nil { + if !k8serrors.IsAlreadyExists(err) { + return err + } + } + return nil +} + +func (r *UploadReconciler) deleteVolumeUploadSourceCR(syncState *dvSyncState) error { + uploadSourceName := volumeUploadSourceName(syncState.dvMutated) + uploadSource := &cdiv1.VolumeUploadSource{} + if err := r.client.Get(context.TODO(), types.NamespacedName{Name: uploadSourceName, Namespace: syncState.dvMutated.Namespace}, uploadSource); err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } else { + if err := r.client.Delete(context.TODO(), uploadSource); err != nil { + if !k8serrors.IsNotFound(err) { + return err + } + } + } + + return nil +} diff --git a/pkg/controller/datavolume/upload-controller_test.go b/pkg/controller/datavolume/upload-controller_test.go index 0ca1b6c7e3..6c958abd1f 100644 --- a/pkg/controller/datavolume/upload-controller_test.go +++ b/pkg/controller/datavolume/upload-controller_test.go @@ -17,13 +17,21 @@ limitations under the License. package datavolume import ( + "context" + "strings" + . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" @@ -32,6 +40,7 @@ import ( featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates" "sigs.k8s.io/controller-runtime/pkg/client/fake" logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var ( @@ -48,6 +57,153 @@ var _ = Describe("All DataVolume Tests", func() { } }) + It("Should create volumeUploadSource if should use cdi populator", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := newUploadDataVolume("test-dv") + dv.Spec.ContentType = cdiv1.DataVolumeArchive + preallocation := true + dv.Spec.Preallocation = &preallocation + reconciler = createUploadReconciler(dv, sc, csiDriver) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(dv.GetAnnotations()[AnnUsePopulator]).To(Equal("true")) + + uploadSource := &cdiv1.VolumeUploadSource{} + uploadSourceName := volumeUploadSourceName(dv) + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: uploadSourceName, Namespace: metav1.NamespaceDefault}, uploadSource) + Expect(err).ToNot(HaveOccurred()) + Expect(uploadSource.Spec.ContentType).To(Equal(dv.Spec.ContentType)) + Expect(uploadSource.Spec.Preallocation).To(Equal(dv.Spec.Preallocation)) + Expect(uploadSource.OwnerReferences).To(HaveLen(1)) + or := uploadSource.OwnerReferences[0] + Expect(or.UID).To(Equal(dv.UID)) + }) + + It("Should delete volumeUploadSource if dv succeeded and we use cdi populator", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := newUploadDataVolume("test-dv") + uploadSourceName := volumeUploadSourceName(dv) + uploadSource := &cdiv1.VolumeUploadSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: uploadSourceName, + Namespace: dv.Namespace, + }, + } + reconciler = createUploadReconciler(dv, sc, csiDriver, uploadSource) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(dv.GetAnnotations()[AnnUsePopulator]).To(Equal("true")) + + dv.Status.Phase = cdiv1.Succeeded + err = reconciler.client.Update(context.TODO(), dv) + Expect(err).ToNot(HaveOccurred()) + + _, err = reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + + deletedUploadSource := &cdiv1.VolumeUploadSource{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: uploadSourceName, Namespace: dv.Namespace}, deletedUploadSource) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + + It("Should fail if dv source not upload when use populators", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := newUploadDataVolume("test-dv") + dv.Spec.Source.Upload = nil + reconciler = createUploadReconciler(dv, sc, csiDriver) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no source set for upload datavolume")) + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + + It("Should create a PVC with volumeUploadSource when use populators", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := newUploadDataVolume("test-dv") + reconciler = createUploadReconciler(dv, sc, csiDriver) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.Name).To(Equal("test-dv")) + Expect(pvc.Labels[common.AppKubernetesPartOfLabel]).To(Equal("testing")) + Expect(pvc.Labels[common.KubePersistentVolumeFillingUpSuppressLabelKey]).To(Equal(common.KubePersistentVolumeFillingUpSuppressLabelValue)) + Expect(pvc.Spec.DataSourceRef).ToNot(BeNil()) + uploadSourceName := volumeUploadSourceName(dv) + Expect(pvc.Spec.DataSourceRef.Name).To(Equal(uploadSourceName)) + Expect(pvc.Spec.DataSourceRef.Kind).To(Equal(cdiv1.VolumeUploadSourceRef)) + }) + + It("Should always report NA progress for upload population", func() { + scName := "testSC" + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + dv := newUploadDataVolume("test-dv") + reconciler = createUploadReconciler(dv, sc, csiDriver) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + dv = &cdiv1.DataVolume{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(string(dv.Status.Progress)).To(Equal("N/A")) + + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.GetAnnotations()[AnnUsePopulator]).To(Equal("true")) + + // updating the annotation to make sure we dont update the progress + // in reality the annotation should never be on upload pvcs + AddAnnotation(pvc, AnnPopulatorProgress, "13.45%") + err = reconciler.client.Update(context.TODO(), pvc) + Expect(err).ToNot(HaveOccurred()) + + _, err = reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + + dv = &cdiv1.DataVolume{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(string(dv.Status.Progress)).To(Equal("N/A")) + }) + var _ = Describe("Reconcile Datavolume status", func() { DescribeTable("DV phase", func(testDv runtime.Object, current, expected cdiv1.DataVolumePhase, pvcPhase corev1.PersistentVolumeClaimPhase, podPhase corev1.PodPhase, ann, expectedEvent string, extraAnnotations ...string) { scName := "testpvc" @@ -63,6 +219,53 @@ var _ = Describe("All DataVolume Tests", func() { Entry("should switch to failed on claim lost for upload", newUploadDataVolume("test-dv"), cdiv1.Pending, cdiv1.Failed, corev1.ClaimLost, corev1.PodFailed, AnnUploadRequest, "PVC test-dv lost", AnnPriorityClassName, "p0-upload"), Entry("should switch to succeeded for upload", newUploadDataVolume("test-dv"), cdiv1.Pending, cdiv1.Succeeded, corev1.ClaimBound, corev1.PodSucceeded, AnnUploadRequest, "Successfully uploaded into test-dv", AnnPriorityClassName, "p0-upload"), ) + + It("Should set DV phase to UploadScheduled if use populators wffc storage class after scheduled node", func() { + scName := "pvc_sc_wffc" + bindingMode := storagev1.VolumeBindingWaitForFirstConsumer + sc := CreateStorageClassWithProvisioner(scName, map[string]string{AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + sc.VolumeBindingMode = &bindingMode + csiDriver := &storagev1.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{ + Name: "csi-plugin", + }, + } + uploadDataVolume := newUploadDataVolume("test-dv") + uploadDataVolume.Spec.PVC.StorageClassName = &scName + + reconciler = createUploadReconciler(sc, csiDriver, uploadDataVolume) + _, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + + pvc := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.Name).To(Equal("test-dv")) + pvc.Status.Phase = corev1.ClaimPending + AddAnnotation(pvc, AnnSelectedNode, "node01") + err = reconciler.client.Update(context.TODO(), pvc) + Expect(err).ToNot(HaveOccurred()) + _, err = reconciler.updateStatus(getReconcileRequest(uploadDataVolume), nil, reconciler) + Expect(err).ToNot(HaveOccurred()) + dv := &cdiv1.DataVolume{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: "test-dv", Namespace: metav1.NamespaceDefault}, dv) + Expect(err).ToNot(HaveOccurred()) + Expect(dv.Status.Phase).To(Equal(cdiv1.UploadScheduled)) + + Expect(dv.Status.Conditions).To(HaveLen(3)) + boundCondition := FindConditionByType(cdiv1.DataVolumeBound, dv.Status.Conditions) + Expect(boundCondition.Status).To(Equal(corev1.ConditionFalse)) + Expect(boundCondition.Message).To(Equal("PVC test-dv Pending")) + By("Checking events recorded") + close(reconciler.recorder.(*record.FakeRecorder).Events) + found := false + for event := range reconciler.recorder.(*record.FakeRecorder).Events { + if strings.Contains(event, "PVC test-dv Pending") { + found = true + } + } + Expect(found).To(BeTrue()) + }) }) }) diff --git a/pkg/controller/datavolume/util.go b/pkg/controller/datavolume/util.go index e44c9a534b..f1772fbbf6 100644 --- a/pkg/controller/datavolume/util.go +++ b/pkg/controller/datavolume/util.go @@ -19,12 +19,14 @@ package datavolume import ( "context" "fmt" + "strconv" "github.com/go-logr/logr" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -303,6 +305,73 @@ func resolveVolumeSize(c client.Client, dvSpec cdiv1.DataVolumeSpec, pvcSpec *v1 return &requestedSize, err } +// storageClassCSIDriverExists returns true if the passed storage class has CSI drivers available +func storageClassCSIDriverExists(client client.Client, log logr.Logger, storageClassName *string) (bool, error) { + log = log.WithName("storageClassCSIDriverExists").V(3) + + storageClass, err := cc.GetStorageClassByName(context.TODO(), client, storageClassName) + if err != nil { + return false, err + } + if storageClass == nil { + log.Info("Target PVC's Storage Class not found") + return false, nil + } + + csiDriver := &storagev1.CSIDriver{} + + if err := client.Get(context.TODO(), types.NamespacedName{Name: storageClass.Provisioner}, csiDriver); err != nil { + if !k8serrors.IsNotFound(err) { + return false, err + } + return false, nil + } + + return true, nil +} + +// CheckPVCUsingPopulators returns true if pvc has dataSourceRef and has +// the usePopulator annotation +func CheckPVCUsingPopulators(pvc *v1.PersistentVolumeClaim) (bool, error) { + if pvc.Spec.DataSourceRef == nil { + return false, nil + } + usePopulator, ok := pvc.Annotations[cc.AnnUsePopulator] + if !ok { + return false, nil + } + boolUsePopulator, err := strconv.ParseBool(usePopulator) + if err != nil { + return false, err + } + return boolUsePopulator, nil +} + +func updateDataVolumeUseCDIPopulator(syncState *dvSyncState) { + cc.AddAnnotation(syncState.dvMutated, cc.AnnUsePopulator, strconv.FormatBool(syncState.usePopulator)) +} + +func checkDVUsingPopulators(dv *cdiv1.DataVolume) (bool, error) { + usePopulator, ok := dv.Annotations[cc.AnnUsePopulator] + if !ok { + return false, nil + } + boolUsePopulator, err := strconv.ParseBool(usePopulator) + if err != nil { + return false, err + } + return boolUsePopulator, nil +} + +func dvBoundOrPopulationInProgress(dataVolume *cdiv1.DataVolume, boundCond *cdiv1.DataVolumeCondition) bool { + usePopulator, err := checkDVUsingPopulators(dataVolume) + if err != nil { + return false + } + return boundCond.Status == v1.ConditionTrue || + (usePopulator && dataVolume.Status.Phase != cdiv1.Pending && dataVolume.Status.Phase != cdiv1.PendingPopulation) +} + func createStorageProfile(name string, accessModes []v1.PersistentVolumeAccessMode, volumeMode v1.PersistentVolumeMode) *cdiv1.StorageProfile { diff --git a/pkg/controller/populators/clone-populator.go b/pkg/controller/populators/clone-populator.go index 73a1939bca..fc51a8b41c 100644 --- a/pkg/controller/populators/clone-populator.go +++ b/pkg/controller/populators/clone-populator.go @@ -317,7 +317,7 @@ func (r *ClonePopulatorReconciler) updateClonePhase(ctx context.Context, pvc *co delete(claimCpy.Annotations, AnnCloneError) cc.AddAnnotation(claimCpy, AnnClonePhase, phase) if progress != "" { - cc.AddAnnotation(claimCpy, AnnPopulatorProgress, progress) + cc.AddAnnotation(claimCpy, cc.AnnPopulatorProgress, progress) } if !apiequality.Semantic.DeepEqual(pvc, claimCpy) { diff --git a/pkg/controller/populators/clone-populator_test.go b/pkg/controller/populators/clone-populator_test.go index 25b39556ae..5973e693f2 100644 --- a/pkg/controller/populators/clone-populator_test.go +++ b/pkg/controller/populators/clone-populator_test.go @@ -279,7 +279,7 @@ var _ = Describe("Clone populator tests", func() { isDefaultResult(result, err) pvc := getTarget(reconciler.client) Expect(pvc.Annotations[AnnClonePhase]).To(Equal("phase2")) - Expect(pvc.Annotations[AnnPopulatorProgress]).To(Equal("50.0%")) + Expect(pvc.Annotations[cc.AnnPopulatorProgress]).To(Equal("50.0%")) }) It("should be in error phase if progress returns an error", func() { diff --git a/pkg/controller/populators/import-populator.go b/pkg/controller/populators/import-populator.go index 9bc99d7cf5..9ce7963fc5 100644 --- a/pkg/controller/populators/import-populator.go +++ b/pkg/controller/populators/import-populator.go @@ -121,23 +121,29 @@ func (r *ImportPopulatorReconciler) getPopulationSource(namespace, name string) // Import-specific implementation of reconcileTargetPVC func (r *ImportPopulatorReconciler) reconcileTargetPVC(pvc, pvcPrime *corev1.PersistentVolumeClaim) (reconcile.Result, error) { + pvcCopy := pvc.DeepCopy() phase := pvcPrime.Annotations[cc.AnnPodPhase] - if err := r.updateImportProgress(phase, pvc, pvcPrime); err != nil { - return reconcile.Result{}, err - } switch phase { case string(corev1.PodRunning): + err := r.updatePVCWithPVCPrimeAnnotations(pvcCopy, pvcPrime, r.updateImportAnnotations) // We requeue to keep reporting progress - return reconcile.Result{RequeueAfter: 2 * time.Second}, nil + return reconcile.Result{RequeueAfter: 2 * time.Second}, err case string(corev1.PodFailed): // We'll get called later once it succeeds - r.recorder.Eventf(pvcPrime, corev1.EventTypeWarning, importFailed, messageImportFailed, pvc.Name) + r.recorder.Eventf(pvc, corev1.EventTypeWarning, importFailed, messageImportFailed, pvc.Name) case string(corev1.PodSucceeded): // Once the import is succeeded, we rebind the PV from PVC' to target PVC if err := cc.Rebind(context.TODO(), r.client, pvcPrime, pvc); err != nil { return reconcile.Result{}, err } + } + + err := r.updatePVCWithPVCPrimeAnnotations(pvcCopy, pvcPrime, r.updateImportAnnotations) + if err != nil { + return reconcile.Result{}, err + } + if cc.IsPVCComplete(pvcPrime) { r.recorder.Eventf(pvc, corev1.EventTypeNormal, importSucceeded, messageImportSucceeded, pvc.Name) } @@ -181,21 +187,35 @@ func (r *ImportPopulatorReconciler) updatePVCForPopulation(pvc *corev1.Persisten annotations[cc.AnnSource] = cc.SourceNone } +func updateVddkAnnotations(pvc, pvcPrime *corev1.PersistentVolumeClaim) { + if cc.GetSource(pvcPrime) != cc.SourceVDDK { + return + } + if vddkHost := pvcPrime.Annotations[cc.AnnVddkHostConnection]; vddkHost != "" { + cc.AddAnnotation(pvc, cc.AnnVddkHostConnection, vddkHost) + } + if vddkVersion := pvcPrime.Annotations[cc.AnnVddkVersion]; vddkVersion != "" { + cc.AddAnnotation(pvc, cc.AnnVddkVersion, vddkVersion) + } +} + +func (r *ImportPopulatorReconciler) updateImportAnnotations(pvc, pvcPrime *corev1.PersistentVolumeClaim) { + phase := pvcPrime.Annotations[cc.AnnPodPhase] + if err := r.updateImportProgress(phase, pvc, pvcPrime); err != nil { + r.log.Error(err, "Failed to update import progress for pvc %s/%s", pvc.Namespace, pvc.Name) + } + updateVddkAnnotations(pvc, pvcPrime) +} + // Progress reporting func (r *ImportPopulatorReconciler) updateImportProgress(podPhase string, pvc, pvcPrime *corev1.PersistentVolumeClaim) error { - if pvc.Annotations == nil { - pvc.Annotations = make(map[string]string) - } // Just set 100.0% if pod is succeeded if podPhase == string(corev1.PodSucceeded) { - pvc.Annotations[cc.AnnImportProgressReporting] = "100.0%" - if err := r.client.Update(context.TODO(), pvc); err != nil { - return err - } + cc.AddAnnotation(pvc, cc.AnnPopulatorProgress, "100.0%") return nil } - importPod, err := r.getImportPod(pvc) + importPod, err := r.getImportPod(pvcPrime) if err != nil { return err } @@ -219,10 +239,7 @@ func (r *ImportPopulatorReconciler) updateImportProgress(podPhase string, pvc, p } if progressReport != "" { if f, err := strconv.ParseFloat(progressReport, 64); err == nil { - pvc.Annotations[cc.AnnImportProgressReporting] = fmt.Sprintf("%.2f%%", f) - if err := r.client.Update(context.TODO(), pvc); err != nil { - return err - } + cc.AddAnnotation(pvc, cc.AnnPopulatorProgress, fmt.Sprintf("%.2f%%", f)) } } diff --git a/pkg/controller/populators/import-populator_test.go b/pkg/controller/populators/import-populator_test.go index f19b23dfe0..4c95e5e48a 100644 --- a/pkg/controller/populators/import-populator_test.go +++ b/pkg/controller/populators/import-populator_test.go @@ -24,13 +24,16 @@ import ( "net/url" "strconv" "strings" + "time" . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" corev1 "k8s.io/api/core/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -258,6 +261,8 @@ var _ = Describe("Import populator tests", func() { pvcPrime, err := reconciler.getPVCPrime(targetPvc) Expect(err).ToNot(HaveOccurred()) Expect(pvcPrime).ToNot(BeNil()) + // make sure we didnt inflate size + Expect(pvcPrime.Spec.Resources.Requests[corev1.ResourceStorage]).To(Equal(resource.MustParse("1G"))) Expect(pvcPrime.GetAnnotations()).ToNot(BeNil()) Expect(pvcPrime.GetAnnotations()[AnnImmediateBinding]).To(Equal("")) Expect(pvcPrime.GetAnnotations()[AnnUploadRequest]).To(Equal("")) @@ -302,6 +307,57 @@ var _ = Describe("Import populator tests", func() { Expect(err).To(Not(HaveOccurred())) Expect(result).To(Not(BeNil())) }) + + table.DescribeTable("should update target pvc with desired annotations from pvc prime", func(podPhase string) { + targetPvc := CreatePvcInStorageClass(targetPvcName, metav1.NamespaceDefault, &sc.Name, nil, nil, corev1.ClaimPending) + targetPvc.Spec.DataSourceRef = dataSourceRef + volumeImportSource := getVolumeImportSource(true, metav1.NamespaceDefault) + pvcPrime := getPVCPrime(targetPvc, nil) + for _, ann := range desiredAnnotations { + AddAnnotation(pvcPrime, ann, "somevalue") + } + AddAnnotation(pvcPrime, AnnPodPhase, podPhase) + AddAnnotation(pvcPrime, "undesiredAnn", "somevalue") + + pv := &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv", + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Namespace: pvcPrime.Namespace, + Name: pvcPrime.Name, + }, + }, + } + pvcPrime.Spec.VolumeName = pv.Name + + By("Reconcile") + reconciler = createImportPopulatorReconciler(targetPvc, pvcPrime, pv, volumeImportSource, sc) + result, err := reconciler.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: targetPvcName, Namespace: metav1.NamespaceDefault}}) + Expect(err).To(Not(HaveOccurred())) + Expect(result).ToNot(BeNil()) + if podPhase == string(corev1.PodRunning) { + Expect(result.RequeueAfter).To(Equal(2 * time.Second)) + } else { + Expect(result.RequeueAfter).To(Equal(0 * time.Second)) + } + + updatedPVC := &corev1.PersistentVolumeClaim{} + err = reconciler.client.Get(context.TODO(), types.NamespacedName{Name: targetPvcName, Namespace: metav1.NamespaceDefault}, updatedPVC) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedPVC.GetAnnotations()).ToNot(BeNil()) + for _, ann := range desiredAnnotations { + _, ok := updatedPVC.Annotations[ann] + Expect(ok).To(BeTrue()) + } + _, ok := updatedPVC.Annotations["undesiredAnn"] + Expect(ok).To(BeFalse()) + }, + table.Entry("with pod running phase", string(corev1.PodRunning)), + table.Entry("with pod failed phase", string(corev1.PodFailed)), + table.Entry("with pod succeded phase", string(corev1.PodSucceeded)), + ) }) var _ = Describe("Import populator progress report", func() { @@ -312,15 +368,15 @@ var _ = Describe("Import populator tests", func() { reconciler = createImportPopulatorReconciler(targetPvc, pvcPrime, sc) err := reconciler.updateImportProgress(string(corev1.PodSucceeded), targetPvc, pvcPrime) Expect(err).To(Not(HaveOccurred())) - Expect(targetPvc.Annotations[AnnImportProgressReporting]).To(Equal("100.0%")) + Expect(targetPvc.Annotations[AnnPopulatorProgress]).To(Equal("100.0%")) }) It("should return error if no metrics in pod", func() { targetPvc := CreatePvcInStorageClass(targetPvcName, metav1.NamespaceDefault, &sc.Name, nil, nil, corev1.ClaimBound) - importPodName := fmt.Sprintf("%s-%s", common.ImporterPodName, targetPvc.Name) - targetPvc.Annotations = map[string]string{AnnImportPod: importPodName} pvcPrime := getPVCPrime(targetPvc, nil) - pod := CreateImporterTestPod(targetPvc, pvcPrime.Name, nil) + importPodName := fmt.Sprintf("%s-%s", common.ImporterPodName, pvcPrime.Name) + pvcPrime.Annotations = map[string]string{AnnImportPod: importPodName} + pod := CreateImporterTestPod(pvcPrime, pvcPrime.Name, nil) pod.Spec.Containers[0].Ports = nil pod.Status.Phase = corev1.PodRunning @@ -358,11 +414,11 @@ var _ = Describe("Import populator tests", func() { }) It("should report progress in target PVC if http endpoint returns matching data", func() { - targetPvc := CreatePvcInStorageClass(targetPvcName, metav1.NamespaceDefault, &sc.Name, nil, nil, corev1.ClaimBound) - importPodName := fmt.Sprintf("%s-%s", common.ImporterPodName, targetPvc.Name) - targetPvc.Annotations = map[string]string{AnnImportPod: importPodName} + targetPvc := CreatePvcInStorageClass(targetPvcName, metav1.NamespaceDefault, &sc.Name, nil, nil, corev1.ClaimPending) targetPvc.SetUID("b856691e-1038-11e9-a5ab-525500d15501") pvcPrime := getPVCPrime(targetPvc, nil) + importPodName := fmt.Sprintf("%s-%s", common.ImporterPodName, pvcPrime.Name) + pvcPrime.Annotations = map[string]string{AnnImportPod: importPodName} ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(fmt.Sprintf("import_progress{ownerUID=\"%v\"} 13.45", targetPvc.GetUID()))) @@ -374,7 +430,7 @@ var _ = Describe("Import populator tests", func() { port, err := strconv.Atoi(ep.Port()) Expect(err).ToNot(HaveOccurred()) - pod := CreateImporterTestPod(targetPvc, pvcPrime.Name, nil) + pod := CreateImporterTestPod(pvcPrime, pvcPrime.Name, nil) pod.Spec.Containers[0].Ports[0].ContainerPort = int32(port) pod.Status.PodIP = ep.Hostname() pod.Status.Phase = corev1.PodRunning @@ -382,7 +438,7 @@ var _ = Describe("Import populator tests", func() { reconciler = createImportPopulatorReconciler(targetPvc, pvcPrime, pod) err = reconciler.updateImportProgress(string(corev1.PodRunning), targetPvc, pvcPrime) Expect(err).ToNot(HaveOccurred()) - Expect(targetPvc.Annotations[AnnImportProgressReporting]).To(BeEquivalentTo("13.45%")) + Expect(targetPvc.Annotations[AnnPopulatorProgress]).To(BeEquivalentTo("13.45%")) }) }) }) diff --git a/pkg/controller/populators/populator-base.go b/pkg/controller/populators/populator-base.go index 1add179b7c..e75d1f81a5 100644 --- a/pkg/controller/populators/populator-base.go +++ b/pkg/controller/populators/populator-base.go @@ -18,6 +18,7 @@ package populators import ( "context" + "reflect" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" @@ -200,14 +201,6 @@ func (r *ReconcilerBase) createPVCPrime(pvc *corev1.PersistentVolumeClaim, sourc } util.SetRecommendedLabels(pvcPrime, r.installerLabels, "cdi-controller") - // disk or image size, inflate it with overhead - requestedSize := pvc.Spec.Resources.Requests[corev1.ResourceStorage] - requestedSize, err := cc.InflateSizeWithOverhead(context.TODO(), r.client, requestedSize.Value(), &pvc.Spec) - if err != nil { - return nil, err - } - pvcPrime.Spec.Resources.Requests[corev1.ResourceStorage] = requestedSize - // We use the populator-specific pvcModifierFunc to add required annotations if updatePVCForPopulation != nil { updatePVCForPopulation(pvcPrime, source) @@ -220,6 +213,37 @@ func (r *ReconcilerBase) createPVCPrime(pvc *corev1.PersistentVolumeClaim, sourc return pvcPrime, nil } +type updatePVCAnnotationsFunc func(pvc, pvcPrime *corev1.PersistentVolumeClaim) + +var desiredAnnotations = []string{cc.AnnPodPhase, cc.AnnPodReady, cc.AnnPodRestarts, + cc.AnnPreallocationRequested, cc.AnnPreallocationApplied, + cc.AnnRunningCondition, cc.AnnRunningConditionMessage, cc.AnnRunningConditionReason} + +func (r *ReconcilerBase) updatePVCWithPVCPrimeAnnotations(pvc, pvcPrime *corev1.PersistentVolumeClaim, updateFunc updatePVCAnnotationsFunc) error { + pvcCopy := pvc.DeepCopy() + for _, ann := range desiredAnnotations { + if value, ok := pvcPrime.GetAnnotations()[ann]; ok { + cc.AddAnnotation(pvcCopy, ann, value) + } else if _, ok := pvcCopy.GetAnnotations()[ann]; ok { + // if the desired Annotation was deleted from pvcPrime + // delete it also in the target pvc + delete(pvcCopy.Annotations, ann) + } + } + if updateFunc != nil { + updateFunc(pvcCopy, pvcPrime) + } + + if !reflect.DeepEqual(pvc.ObjectMeta, pvcCopy.ObjectMeta) { + err := r.client.Update(context.TODO(), pvcCopy) + if err != nil { + return err + } + } + + return nil +} + // reconcile functions func (r *ReconcilerBase) reconcile(req reconcile.Request, populator populatorController, log logr.Logger) (reconcile.Result, error) { @@ -240,7 +264,7 @@ func (r *ReconcilerBase) reconcile(req reconcile.Request, populator populatorCon } // Each populator reconciles the target PVC in a different way - if cc.IsUnbound(pvc) { + if cc.IsUnbound(pvc) || !cc.IsPVCComplete(pvc) { return populator.reconcileTargetPVC(pvc, pvcPrime) } diff --git a/pkg/controller/populators/upload-populator.go b/pkg/controller/populators/upload-populator.go index 9049443f53..27491bc312 100644 --- a/pkg/controller/populators/upload-populator.go +++ b/pkg/controller/populators/upload-populator.go @@ -122,10 +122,7 @@ func (r *UploadPopulatorReconciler) updatePVCPrimeNameAnnotation(pvc *corev1.Per return false, nil } - if pvc.Annotations == nil { - pvc.Annotations = make(map[string]string) - } - pvc.Annotations[AnnPVCPrimeName] = pvcPrimeName + cc.AddAnnotation(pvc, AnnPVCPrimeName, pvcPrimeName) if err := r.client.Update(context.TODO(), pvc); err != nil { return false, err } @@ -133,17 +130,18 @@ func (r *UploadPopulatorReconciler) updatePVCPrimeNameAnnotation(pvc *corev1.Per return true, nil } -func (r *UploadPopulatorReconciler) removePVCPrimeNameAnnotation(pvc *corev1.PersistentVolumeClaim) error { +func removePVCPrimeNameAnnotation(pvc *corev1.PersistentVolumeClaim) { if _, ok := pvc.Annotations[AnnPVCPrimeName]; !ok { - return nil + return } delete(pvc.Annotations, AnnPVCPrimeName) - return r.client.Update(context.TODO(), pvc) } func (r *UploadPopulatorReconciler) reconcileTargetPVC(pvc, pvcPrime *corev1.PersistentVolumeClaim) (reconcile.Result, error) { - updated, err := r.updatePVCPrimeNameAnnotation(pvc, pvcPrime.Name) + pvcCopy := pvc.DeepCopy() + + updated, err := r.updatePVCPrimeNameAnnotation(pvcCopy, pvcPrime.Name) if updated || err != nil { // wait for the annotation to be updated return reconcile.Result{}, err @@ -154,17 +152,21 @@ func (r *UploadPopulatorReconciler) reconcileTargetPVC(pvc, pvcPrime *corev1.Per switch phase { case string(corev1.PodFailed): // We'll get called later once it succeeds - r.recorder.Eventf(pvcPrime, corev1.EventTypeWarning, errUploadFailed, fmt.Sprintf(messageUploadFailed, pvc.Name)) + r.recorder.Eventf(pvc, corev1.EventTypeWarning, errUploadFailed, fmt.Sprintf(messageUploadFailed, pvc.Name)) case string(corev1.PodSucceeded): - err := r.removePVCPrimeNameAnnotation(pvc) - if err != nil { - return reconcile.Result{}, err - } + removePVCPrimeNameAnnotation(pvcCopy) // Once the upload is succeeded, we rebind the PV from PVC' to target PVC - if err := cc.Rebind(context.TODO(), r.client, pvcPrime, pvc); err != nil { + if err := cc.Rebind(context.TODO(), r.client, pvcPrime, pvcCopy); err != nil { return reconcile.Result{}, err } - r.recorder.Eventf(pvc, corev1.EventTypeNormal, uploadSucceeded, fmt.Sprintf(messageUploadSucceeded, pvc)) + } + + err = r.updatePVCWithPVCPrimeAnnotations(pvcCopy, pvcPrime, nil) + if err != nil { + return reconcile.Result{}, err + } + if cc.IsPVCComplete(pvcPrime) { + r.recorder.Eventf(pvc, corev1.EventTypeNormal, uploadSucceeded, fmt.Sprintf(messageUploadSucceeded, pvc.Name)) } return reconcile.Result{}, nil diff --git a/pkg/controller/populators/upload-populator_test.go b/pkg/controller/populators/upload-populator_test.go index eb08c4b7dc..8037758068 100644 --- a/pkg/controller/populators/upload-populator_test.go +++ b/pkg/controller/populators/upload-populator_test.go @@ -95,8 +95,7 @@ var _ = Describe("Datavolume controller reconcile loop", func() { It("should set event if upload pod failed", func() { pvc := newUploadPopulatorPVC("test-pvc") - pvc.Annotations = make(map[string]string) - pvc.Annotations[AnnPVCPrimeName] = PVCPrimeName(pvc) + cc.AddAnnotation(pvc, AnnPVCPrimeName, PVCPrimeName(pvc)) uploadPV := uploadPV(pvc) volumeUploadSourceCR := newUploadPopulatorCR("", false) @@ -108,7 +107,7 @@ var _ = Describe("Datavolume controller reconcile loop", func() { Expect(err).ToNot(HaveOccurred()) pvcPrime, err := r.getPVCPrime(pvc) Expect(err).ToNot(HaveOccurred()) - pvcPrime.Annotations[cc.AnnPodPhase] = string(corev1.PodFailed) + cc.AddAnnotation(pvcPrime, cc.AnnPodPhase, string(corev1.PodFailed)) pvcPrime.Spec.VolumeName = "test-pv" pvcPrime.UID = pvcPrimeUID err = r.client.Update(context.TODO(), pvcPrime) @@ -128,8 +127,7 @@ var _ = Describe("Datavolume controller reconcile loop", func() { It("should rebind PV to target PVC", func() { pvc := newUploadPopulatorPVC("test-pvc") - pvc.Annotations = make(map[string]string) - pvc.Annotations[AnnPVCPrimeName] = PVCPrimeName(pvc) + cc.AddAnnotation(pvc, AnnPVCPrimeName, PVCPrimeName(pvc)) uploadPV := uploadPV(pvc) volumeUploadSourceCR := newUploadPopulatorCR("", false) @@ -141,7 +139,8 @@ var _ = Describe("Datavolume controller reconcile loop", func() { Expect(err).ToNot(HaveOccurred()) pvcPrime, err := r.getPVCPrime(pvc) Expect(err).ToNot(HaveOccurred()) - pvcPrime.Annotations[cc.AnnPodPhase] = string(corev1.PodSucceeded) + + cc.AddAnnotation(pvcPrime, cc.AnnPodPhase, string(corev1.PodSucceeded)) pvcPrime.Spec.VolumeName = "test-pv" pvcPrime.UID = pvcPrimeUID err = r.client.Update(context.TODO(), pvcPrime) @@ -159,14 +158,15 @@ var _ = Describe("Datavolume controller reconcile loop", func() { updatedPVC := &corev1.PersistentVolumeClaim{} err = r.client.Get(context.TODO(), types.NamespacedName{Name: "test-pvc", Namespace: metav1.NamespaceDefault}, updatedPVC) Expect(err).ToNot(HaveOccurred()) - Expect(updatedPVC.GetAnnotations()).To(BeNil()) + Expect(updatedPVC.GetAnnotations()[AnnPVCPrimeName]).To(BeEmpty()) expectEvent(r, uploadSucceeded) }) - It("should clean PVCPrime when targetPVC bound", func() { + It("should clean PVCPrime when targetPVC bound and succeeded", func() { pvc := newUploadPopulatorPVC("test-pvc") pvc.Spec.VolumeName = "test-pv" + cc.AddAnnotation(pvc, cc.AnnPodPhase, string(corev1.PodSucceeded)) pvcPrime := newUploadPopulatorPVC(PVCPrimeName(pvc)) volumeUploadSourceCR := newUploadPopulatorCR("", false) @@ -209,8 +209,7 @@ var _ = Describe("Datavolume controller reconcile loop", func() { Expect(err).To(HaveOccurred()) Expect(errors.IsNotFound(err)).To(BeTrue()) - pvc.Annotations = make(map[string]string) - pvc.Annotations[cc.AnnSelectedNode] = "node01" + cc.AddAnnotation(pvc, cc.AnnSelectedNode, "node01") err = r.client.Update(context.TODO(), pvc) Expect(err).ToNot(HaveOccurred()) @@ -224,6 +223,52 @@ var _ = Describe("Datavolume controller reconcile loop", func() { Expect(pvcPrime).ToNot(BeNil()) Expect(pvcPrime.Annotations[cc.AnnSelectedNode]).To(Equal("node01")) }) + + DescribeTable("should update target pvc with desired annotations from pvc prime", func(podPhase string) { + pvc := newUploadPopulatorPVC("test-pvc") + cc.AddAnnotation(pvc, AnnPVCPrimeName, PVCPrimeName(pvc)) + uploadPV := uploadPV(pvc) + + volumeUploadSourceCR := newUploadPopulatorCR("", false) + scName := "test-sc" + sc := cc.CreateStorageClassWithProvisioner(scName, map[string]string{cc.AnnDefaultStorageClass: "true"}, map[string]string{}, "csi-plugin") + r := createUploadPopulatorReconciler(pvc, volumeUploadSourceCR, sc, uploadPV) + + _, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-pvc", Namespace: metav1.NamespaceDefault}}) + Expect(err).ToNot(HaveOccurred()) + pvcPrime, err := r.getPVCPrime(pvc) + Expect(err).ToNot(HaveOccurred()) + + pvcPrime.Spec.VolumeName = "test-pv" + pvcPrime.UID = pvcPrimeUID + for _, ann := range desiredAnnotations { + cc.AddAnnotation(pvcPrime, ann, "somevalue") + } + cc.AddAnnotation(pvcPrime, cc.AnnPodPhase, podPhase) + cc.AddAnnotation(pvcPrime, "undesiredAnn", "somevalue") + err = r.client.Update(context.TODO(), pvcPrime) + Expect(err).ToNot(HaveOccurred()) + + By("Reconcile") + result, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-pvc", Namespace: metav1.NamespaceDefault}}) + Expect(err).To(Not(HaveOccurred())) + Expect(result).ToNot(BeNil()) + + updatedPVC := &corev1.PersistentVolumeClaim{} + err = r.client.Get(context.TODO(), types.NamespacedName{Name: "test-pvc", Namespace: metav1.NamespaceDefault}, updatedPVC) + Expect(err).ToNot(HaveOccurred()) + Expect(updatedPVC.GetAnnotations()).ToNot(BeNil()) + for _, ann := range desiredAnnotations { + _, ok := updatedPVC.Annotations[ann] + Expect(ok).To(BeTrue()) + } + _, ok := updatedPVC.Annotations["undesiredAnn"] + Expect(ok).To(BeFalse()) + }, + Entry("with pod running phase", string(corev1.PodRunning)), + Entry("with pod succeded phase", string(corev1.PodFailed)), + Entry("with pod succeded phase", string(corev1.PodSucceeded)), + ) }) func newUploadPopulatorPVC(name string) *corev1.PersistentVolumeClaim { diff --git a/pkg/controller/populators/util.go b/pkg/controller/populators/util.go index 519e8e53c6..f0cddb3581 100644 --- a/pkg/controller/populators/util.go +++ b/pkg/controller/populators/util.go @@ -30,9 +30,6 @@ import ( ) const ( - // AnnPopulatorProgress is a standard annotation that can be used progress reporting - AnnPopulatorProgress = cc.AnnAPIGroup + "/storage.populator.progress" - primePvcPrefix = "prime" // errCreatingPVCPrime provides a const to indicate we failed to create PVC prime for population diff --git a/tests/api_validation_test.go b/tests/api_validation_test.go index dcb58a4e22..6434462d65 100644 --- a/tests/api_validation_test.go +++ b/tests/api_validation_test.go @@ -327,6 +327,7 @@ var _ = Describe("[rfe_id:1130][crit:medium][posneg:negative][vendor:cnv-qe@redh } Expect(err).ToNot(HaveOccurred()) progressRegExp := regexp.MustCompile(`\d{1,3}\.?\d{1,2}%`) + By("Waiting for datavolume to indicate progress") Eventually(func() bool { dv, err := f.CdiClient.CdiV1beta1().DataVolumes(f.Namespace.Name).Get(context.TODO(), dataVolumeName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/csiclone_test.go b/tests/csiclone_test.go index 09d89d6722..27963a080b 100644 --- a/tests/csiclone_test.go +++ b/tests/csiclone_test.go @@ -106,15 +106,16 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component][crit:high][rfe_id: Skip("CSI Volume Clone is not applicable") } - By("Configure namespace quota") - Expect(f.CreateStorageQuota(int64(2), int64(1024*1024*1024))).To(Succeed()) - By(fmt.Sprintf("configure storage profile %s", f.CsiCloneSCName)) Expect( utils.ConfigureCloneStrategy(f.CrClient, f.CdiClient, f.CsiCloneSCName, originalProfileSpec, cdiv1.CloneStrategyCsiClone), ).To(Succeed()) - dataVolume, md5 := createDataVolumeDontWait("dv-csi-clone-test-1", utils.DefaultImagePath, v1.PersistentVolumeFilesystem, f.CsiCloneSCName, f) + sourcePvc, md5 := createAndVerifySourcePVC("dv-csi-clone-test-1", utils.DefaultImagePath, f.CsiCloneSCName, v1.PersistentVolumeFilesystem, f) + By("Configure namespace quota after source is ready") + Expect(f.CreateStorageQuota(int64(2), int64(1024*1024*1024))).To(Succeed()) + + dataVolume := createCloneDataVolumeFromSource(sourcePvc, "dv-csi-clone-test-1", f.CsiCloneSCName, f) By("Verify Quota was exceeded in events and dv conditions") waitForDvPhase(cdiv1.Pending, dataVolume, f) f.ExpectEvent(dataVolume.Namespace).Should(ContainSubstring(cc.ErrExceededQuota)) @@ -150,7 +151,7 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component][crit:high][rfe_id: }) -func createDataVolumeDontWait(dataVolumeName, testPath string, volumeMode v1.PersistentVolumeMode, scName string, f *framework.Framework) (*cdiv1.DataVolume, string) { +func createAndVerifySourcePVC(dataVolumeName, testPath, scName string, volumeMode v1.PersistentVolumeMode, f *framework.Framework) (*v1.PersistentVolumeClaim, string) { sourcePvc := createAndPopulateSourcePVC(dataVolumeName, volumeMode, scName, f) md5, err := f.GetMD5(f.Namespace, sourcePvc, testPath, utils.UploadFileSize) Expect(err).ToNot(HaveOccurred()) @@ -158,15 +159,26 @@ func createDataVolumeDontWait(dataVolumeName, testPath string, volumeMode v1.Per err = utils.DeletePodByName(f.K8sClient, utils.VerifierPodName, f.Namespace.Name, &zero) Expect(err).ToNot(HaveOccurred()) + return sourcePvc, md5 +} + +func createCloneDataVolumeFromSource(sourcePvc *v1.PersistentVolumeClaim, dataVolumeName, scName string, f *framework.Framework) *cdiv1.DataVolume { By(fmt.Sprintf("creating a new target PVC (datavolume) to clone %s", sourcePvc.Name)) dataVolume := utils.NewCloningDataVolume(dataVolumeName, "1Gi", sourcePvc) if scName != "" { dataVolume.Spec.PVC.StorageClassName = &scName } By(fmt.Sprintf("creating new datavolume %s", dataVolume.Name)) - dataVolume, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dataVolume) + dataVolume, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dataVolume) Expect(err).ToNot(HaveOccurred()) + return dataVolume +} + +func createDataVolumeDontWait(dataVolumeName, testPath string, volumeMode v1.PersistentVolumeMode, scName string, f *framework.Framework) (*cdiv1.DataVolume, string) { + sourcePvc, md5 := createAndVerifySourcePVC(dataVolumeName, testPath, scName, volumeMode, f) + dataVolume := createCloneDataVolumeFromSource(sourcePvc, dataVolumeName, scName, f) + return dataVolume, md5 } diff --git a/tests/datavolume_test.go b/tests/datavolume_test.go index 2de5de846d..b794b4427e 100644 --- a/tests/datavolume_test.go +++ b/tests/datavolume_test.go @@ -27,6 +27,7 @@ import ( "kubevirt.io/containerized-data-importer/pkg/common" controller "kubevirt.io/containerized-data-importer/pkg/controller/common" dvc "kubevirt.io/containerized-data-importer/pkg/controller/datavolume" + "kubevirt.io/containerized-data-importer/pkg/controller/populators" featuregates "kubevirt.io/containerized-data-importer/pkg/feature-gates" "kubevirt.io/containerized-data-importer/pkg/util/naming" "kubevirt.io/containerized-data-importer/tests/framework" @@ -232,19 +233,20 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", Describe("Verify DataVolume", func() { type dataVolumeTestArguments struct { - name string - size string - url func() string - dvFunc func(string, string, string) *cdiv1.DataVolume - errorMessage string - errorMessageFunc func() string - eventReason string - phase cdiv1.DataVolumePhase - repeat int - checkPermissions bool - readyCondition *cdiv1.DataVolumeCondition - boundCondition *cdiv1.DataVolumeCondition - runningCondition *cdiv1.DataVolumeCondition + name string + size string + url func() string + dvFunc func(string, string, string) *cdiv1.DataVolume + errorMessage string + errorMessageFunc func() string + eventReason string + phase cdiv1.DataVolumePhase + repeat int + checkPermissions bool + readyCondition *cdiv1.DataVolumeCondition + boundCondition *cdiv1.DataVolumeCondition + boundConditionWithPopulators *cdiv1.DataVolumeCondition + runningCondition *cdiv1.DataVolumeCondition } createHTTPSDataVolume := func(dataVolumeName, size, url string) *cdiv1.DataVolume { @@ -324,14 +326,20 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", waitForDvPhase(args.phase, dataVolume, f) - By("Verifying the DV has the correct conditions and messages for those conditions") - utils.WaitForConditions(f, dataVolume.Name, f.Namespace.Name, timeout, pollingInterval, args.readyCondition, args.runningCondition, args.boundCondition) - // verify PVC was created By("verifying pvc was created") pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dataVolume.Namespace).Get(context.TODO(), dataVolume.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + By("Verifying the DV has the correct conditions and messages for those conditions") + usePopulator, err := dvc.CheckPVCUsingPopulators(pvc) + Expect(err).ToNot(HaveOccurred()) + if usePopulator && args.boundConditionWithPopulators != nil { + utils.WaitForConditions(f, dataVolume.Name, f.Namespace.Name, timeout, pollingInterval, args.readyCondition, args.runningCondition, args.boundConditionWithPopulators) + } else { + utils.WaitForConditions(f, dataVolume.Name, f.Namespace.Name, timeout, pollingInterval, args.readyCondition, args.runningCondition, args.boundCondition) + } + By("Verifying event occurred") Eventually(func() bool { // Only find DV events, we know the PVC gets the same events @@ -404,20 +412,26 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", utils.WaitForConditions(f, dataVolume.Name, f.Namespace.Name, timeout, pollingInterval, boundCondition, readyCondition) By("Increase quota") - err = f.UpdateStorageQuota(int64(2), int64(2*1024*1024*1024)) + err = f.UpdateStorageQuota(int64(4), int64(4*1024*1024*1024)) Expect(err).ToNot(HaveOccurred()) f.ForceBindPvcIfDvIsWaitForFirstConsumer(dataVolume) waitForDvPhase(args.phase, dataVolume, f) - By("Verifying the DV has the correct conditions and messages for those conditions") - utils.WaitForConditions(f, dataVolume.Name, f.Namespace.Name, timeout, pollingInterval, args.readyCondition, args.runningCondition, args.boundCondition) - // verify PVC was created By("verifying pvc was created") - _, err = f.K8sClient.CoreV1().PersistentVolumeClaims(dataVolume.Namespace).Get(context.TODO(), dataVolume.Name, metav1.GetOptions{}) + pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dataVolume.Namespace).Get(context.TODO(), dataVolume.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + By("Verifying the DV has the correct conditions and messages for those conditions") + usePopulator, err := dvc.CheckPVCUsingPopulators(pvc) + Expect(err).ToNot(HaveOccurred()) + if usePopulator && args.boundConditionWithPopulators != nil { + utils.WaitForConditions(f, dataVolume.Name, f.Namespace.Name, timeout, pollingInterval, args.readyCondition, args.runningCondition, args.boundConditionWithPopulators) + } else { + utils.WaitForConditions(f, dataVolume.Name, f.Namespace.Name, timeout, pollingInterval, args.readyCondition, args.runningCondition, args.boundCondition) + } + By("Verifying event occurred") Eventually(func() bool { // Only find DV events, we know the PVC gets the same events @@ -485,6 +499,12 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", Message: "PVC dv-http-import-invalid-url Bound", Reason: "Bound", }, + boundConditionWithPopulators: &cdiv1.DataVolumeCondition{ + Type: cdiv1.DataVolumeBound, + Status: v1.ConditionFalse, + Message: "PVC dv-http-import-invalid-url Pending", + Reason: "Pending", + }, runningCondition: &cdiv1.DataVolumeCondition{ Type: cdiv1.DataVolumeRunning, Status: v1.ConditionFalse, @@ -509,6 +529,12 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", Message: "PVC dv-http-import-404 Bound", Reason: "Bound", }, + boundConditionWithPopulators: &cdiv1.DataVolumeCondition{ + Type: cdiv1.DataVolumeBound, + Status: v1.ConditionFalse, + Message: "PVC dv-http-import-404 Pending", + Reason: "Pending", + }, runningCondition: &cdiv1.DataVolumeCondition{ Type: cdiv1.DataVolumeRunning, Status: v1.ConditionFalse, @@ -533,6 +559,12 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", Message: "PVC dv-invalid-qcow-large-memory Bound", Reason: "Bound", }, + boundConditionWithPopulators: &cdiv1.DataVolumeCondition{ + Type: cdiv1.DataVolumeBound, + Status: v1.ConditionFalse, + Message: "PVC dv-invalid-qcow-large-memory Pending", + Reason: "Pending", + }, runningCondition: &cdiv1.DataVolumeCondition{ Type: cdiv1.DataVolumeRunning, Status: v1.ConditionFalse, @@ -749,6 +781,12 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", Message: "PVC upload-dv Bound", Reason: "Bound", }, + boundConditionWithPopulators: &cdiv1.DataVolumeCondition{ + Type: cdiv1.DataVolumeBound, + Status: v1.ConditionFalse, + Message: "PVC upload-dv Pending", + Reason: "Pending", + }, runningCondition: &cdiv1.DataVolumeCondition{ Type: cdiv1.DataVolumeRunning, Status: v1.ConditionTrue, @@ -795,6 +833,12 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", Message: "PVC dv-non-tar-archive Bound", Reason: "Bound", }, + boundConditionWithPopulators: &cdiv1.DataVolumeCondition{ + Type: cdiv1.DataVolumeBound, + Status: v1.ConditionFalse, + Message: "PVC dv-non-tar-archive Pending", + Reason: "Pending", + }, runningCondition: &cdiv1.DataVolumeCondition{ Type: cdiv1.DataVolumeRunning, Status: v1.ConditionFalse, @@ -1168,6 +1212,12 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", Message: "PVC upload-dv Bound", Reason: "Bound", }, + boundConditionWithPopulators: &cdiv1.DataVolumeCondition{ + Type: cdiv1.DataVolumeBound, + Status: v1.ConditionFalse, + Message: "PVC upload-dv Pending", + Reason: "Pending", + }, runningCondition: &cdiv1.DataVolumeCondition{ Type: cdiv1.DataVolumeRunning, Status: v1.ConditionTrue, @@ -1502,7 +1552,15 @@ var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]DataVolume tests", By("verifying pvc and pod were created") pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dataVolume.Namespace).Get(context.TODO(), dataVolume.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + usePopulator, err := dvc.CheckPVCUsingPopulators(pvc) + Expect(err).ToNot(HaveOccurred()) podName := pvc.Annotations[controller.AnnImportPod] + if usePopulator { + pvcPrimeName := populators.PVCPrimeName(pvc) + pvcPrime, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dataVolume.Namespace).Get(context.TODO(), pvcPrimeName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + podName = pvcPrime.Annotations[controller.AnnImportPod] + } pod, err := f.K8sClient.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) diff --git a/tests/external_population_test.go b/tests/external_population_test.go index 4b91c5291e..5719c20429 100644 --- a/tests/external_population_test.go +++ b/tests/external_population_test.go @@ -226,11 +226,14 @@ var _ = Describe("Population tests", func() { By("Verifying pvc was created") pvc, err := utils.WaitForPVC(f.K8sClient, dataVolume.Namespace, dataVolume.Name) Expect(err).ToNot(HaveOccurred()) - f.ForceBindIfWaitForFirstConsumer(pvc) + executorPod := f.CreateConsumerPod(pvc) // We check the expected event + By("Wait for expected no cdi driver event") f.ExpectEvent(dataVolume.Namespace).Should(ContainSubstring(dvc.NoCSIDriverForExternalPopulation)) By("Cleaning up") + err = utils.DeletePodNoGrace(f.K8sClient, executorPod, dataVolume.Namespace) + Expect(err).ToNot(HaveOccurred()) err = utils.DeleteDataVolume(f.CdiClient, f.Namespace.Name, dataVolume.Name) Expect(err).ToNot(HaveOccurred()) Eventually(func() bool { diff --git a/tests/framework/pvc.go b/tests/framework/pvc.go index e5c3506407..4f477a9dbf 100644 --- a/tests/framework/pvc.go +++ b/tests/framework/pvc.go @@ -77,9 +77,18 @@ func (f *Framework) ForceBindPvcIfDvIsWaitForFirstConsumer(dv *cdiv1.DataVolume) pvc, err := utils.WaitForPVC(f.K8sClient, dv.Namespace, dv.Name) gomega.Expect(err).ToNot(gomega.HaveOccurred(), "PVC should exist") if f.IsBindingModeWaitForFirstConsumer(pvc.Spec.StorageClassName) { - err = utils.WaitForDataVolumePhase(f, dv.Namespace, cdiv1.WaitForFirstConsumer, dv.Name) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - createConsumerPod(pvc, f) + // check if pvc is a population pvc but not from pvc or snapshot + if pvc.Spec.DataSourceRef != nil && + (dv.Spec.Source == nil || dv.Spec.Source.PVC == nil) && + (dv.Spec.Source == nil || dv.Spec.Source.Snapshot == nil) { + err = utils.WaitForDataVolumePhase(f, dv.Namespace, cdiv1.PendingPopulation, dv.Name) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + createConsumerPodForPopulationPVC(pvc, f) + } else { + err = utils.WaitForDataVolumePhase(f, dv.Namespace, cdiv1.WaitForFirstConsumer, dv.Name) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + createConsumerPod(pvc, f) + } } } @@ -91,7 +100,11 @@ func (f *Framework) WaitPVCDeletedByUID(pvcSpec *k8sv1.PersistentVolumeClaim, ti // ForceBindIfWaitForFirstConsumer creates a Pod with the passed in PVC mounted under /dev/pvc, which forces the PVC to be scheduled and bound. func (f *Framework) ForceBindIfWaitForFirstConsumer(targetPvc *k8sv1.PersistentVolumeClaim) { if targetPvc.Spec.VolumeName == "" && f.IsBindingModeWaitForFirstConsumer(targetPvc.Spec.StorageClassName) { - createConsumerPod(targetPvc, f) + if targetPvc.Spec.DataSourceRef != nil { + createConsumerPodForPopulationPVC(targetPvc, f) + } else { + createConsumerPod(targetPvc, f) + } } } @@ -102,8 +115,8 @@ func (f *Framework) ForceSchedulingIfWaitForFirstConsumerPopulationPVC(targetPvc } } -func createConsumerPod(targetPvc *k8sv1.PersistentVolumeClaim, f *Framework) { - fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: creating \"consumer-pod\" to force binding PVC: %s\n", targetPvc.Name) +// CreateConsumerPod create a pod that consumes the given PVC +func (f *Framework) CreateConsumerPod(targetPvc *k8sv1.PersistentVolumeClaim) *k8sv1.Pod { namespace := targetPvc.Namespace err := utils.WaitForPersistentVolumeClaimPhase(f.K8sClient, targetPvc.Namespace, k8sv1.ClaimPending, targetPvc.Name) @@ -112,7 +125,15 @@ func createConsumerPod(targetPvc *k8sv1.PersistentVolumeClaim, f *Framework) { podName := naming.GetResourceName("consumer-pod", targetPvc.Name) executorPod, err := f.CreateNoopPodWithPVC(podName, namespace, targetPvc) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - err = utils.WaitTimeoutForPodSucceeded(f.K8sClient, executorPod.Name, namespace, utils.PodWaitForTime) + return executorPod +} + +func createConsumerPod(targetPvc *k8sv1.PersistentVolumeClaim, f *Framework) { + fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: creating \"consumer-pod\" to force binding PVC: %s\n", targetPvc.Name) + executorPod := f.CreateConsumerPod(targetPvc) + + namespace := targetPvc.Namespace + err := utils.WaitTimeoutForPodSucceeded(f.K8sClient, executorPod.Name, namespace, utils.PodWaitForTime) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = utils.WaitForPersistentVolumeClaimPhase(f.K8sClient, namespace, k8sv1.ClaimBound, targetPvc.Name) @@ -123,15 +144,9 @@ func createConsumerPod(targetPvc *k8sv1.PersistentVolumeClaim, f *Framework) { func createConsumerPodForPopulationPVC(targetPvc *k8sv1.PersistentVolumeClaim, f *Framework) { fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: creating \"consumer-pod\" to get 'selected-node' annotation on PVC: %s\n", targetPvc.Name) - namespace := targetPvc.Namespace - - err := utils.WaitForPersistentVolumeClaimPhase(f.K8sClient, targetPvc.Namespace, k8sv1.ClaimPending, targetPvc.Name) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - podName := naming.GetResourceName("consumer-pod", targetPvc.Name) - executorPod, err := f.CreateNoopPodWithPVC(podName, namespace, targetPvc) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + executorPod := f.CreateConsumerPod(targetPvc) + namespace := targetPvc.Namespace selectedNode, status, err := utils.WaitForPVCAnnotation(f.K8sClient, namespace, targetPvc, controller.AnnSelectedNode) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(status).To(gomega.BeTrue()) @@ -299,6 +314,7 @@ func (f *Framework) VerifyFSOverhead(namespace *k8sv1.Namespace, pvc *k8sv1.Pers } requestedSize := pvc.Spec.Resources.Requests[k8sv1.ResourceStorage] + fmt.Fprintf(ginkgo.GinkgoWriter, "INFO: VerifyFSOverhead comparison: Virtual: %d, Actual: %d, requestedSize: %d\n", info.VirtualSize, info.ActualSize, requestedSize.Value()) return info.VirtualSize <= info.ActualSize && info.VirtualSize < requestedSize.Value(), nil } diff --git a/tests/import_test.go b/tests/import_test.go index 1e44c49f5e..0593f0e5eb 100644 --- a/tests/import_test.go +++ b/tests/import_test.go @@ -1244,9 +1244,17 @@ var _ = Describe("Preallocation", func() { if dv.Spec.Source.Registry != nil && dv.Spec.Source.Registry.ImageStream != nil { By("Verify image lookup annotation") podName := pvc.Annotations[controller.AnnImportPod] - pod, err := f.K8sClient.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(pod.Annotations[controller.AnnOpenShiftImageLookup]).To(Equal("*")) + if pvc.Spec.DataSourceRef != nil { + Expect(podName).To(BeEmpty()) + } else { + // when using populators when the population completes PVC' and + // the importer pod are deleted, so can't check the annotation + // TODO: any suggestions? putting the check before dv completes is + // still racy + pod, err := f.K8sClient.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), podName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(pod.Annotations[controller.AnnOpenShiftImageLookup]).To(Equal("*")) + } } }, Entry("HTTP import (ISO image)", true, utils.TinyCoreMD5, utils.DefaultImagePath, func() *cdiv1.DataVolume { @@ -1376,7 +1384,13 @@ var _ = Describe("Preallocation", func() { Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) - Expect(f.VerifyFSOverhead(f.Namespace, pvc, preallocation)).To(BeTrue()) + // incase of using populators the requested size with the fsoverhead + // is put only on the PVC' which at thisd point we can't check + // TODO: any suggestions? getting the requested size from PVC' in earlier + // point in the test seems to be racy + if pvc.Spec.DataSourceRef == nil { + Expect(f.VerifyFSOverhead(f.Namespace, pvc, preallocation)).To(BeTrue()) + } pvc, err = utils.FindPVC(f.K8sClient, dataVolume.Namespace, dataVolume.Name) Expect(err).ToNot(HaveOccurred()) @@ -1619,7 +1633,7 @@ var _ = Describe("Import populator", func() { Expect(f.VerifyPermissions(f.Namespace, pvc)).To(BeTrue(), "Permissions on disk image are not 660") By("Verify 100.0% annotation") - progress, ok, err := utils.WaitForPVCAnnotation(f.K8sClient, f.Namespace.Name, pvc, controller.AnnImportProgressReporting) + progress, ok, err := utils.WaitForPVCAnnotation(f.K8sClient, f.Namespace.Name, pvc, controller.AnnPopulatorProgress) Expect(err).ToNot(HaveOccurred()) Expect(ok).To(BeTrue()) Expect(progress).Should(BeEquivalentTo("100.0%")) @@ -1671,7 +1685,7 @@ var _ = Describe("Import populator", func() { Expect(f.VerifySparse(f.Namespace, pvc, utils.DefaultPvcMountPath)).To(BeTrue()) By("Verify 100.0% annotation") - progress, ok, err := utils.WaitForPVCAnnotation(f.K8sClient, f.Namespace.Name, pvc, controller.AnnImportProgressReporting) + progress, ok, err := utils.WaitForPVCAnnotation(f.K8sClient, f.Namespace.Name, pvc, controller.AnnPopulatorProgress) Expect(err).ToNot(HaveOccurred()) Expect(ok).To(BeTrue()) Expect(progress).Should(BeEquivalentTo("100.0%")) @@ -1710,7 +1724,7 @@ var _ = Describe("Import populator", func() { Expect(same).To(BeTrue()) By("Verify 100.0% annotation") - progress, ok, err := utils.WaitForPVCAnnotation(f.K8sClient, f.Namespace.Name, pvc, controller.AnnImportProgressReporting) + progress, ok, err := utils.WaitForPVCAnnotation(f.K8sClient, f.Namespace.Name, pvc, controller.AnnPopulatorProgress) Expect(err).ToNot(HaveOccurred()) Expect(ok).To(BeTrue()) Expect(progress).Should(BeEquivalentTo("100.0%")) diff --git a/tests/operator_test.go b/tests/operator_test.go index f47eceaa06..ad1f646b25 100644 --- a/tests/operator_test.go +++ b/tests/operator_test.go @@ -426,9 +426,13 @@ var _ = Describe("ALL Operator tests", func() { Expect(err).ToNot(HaveOccurred()) f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + uploadPodName := utils.UploadPodName(pvc) + By("Waiting for pod to be running") Eventually(func() bool { - pod, err := f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), "cdi-upload-"+dv.Name, metav1.GetOptions{}) + pod, err := f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), uploadPodName, metav1.GetOptions{}) if errors.IsNotFound(err) { return false } diff --git a/tests/utils/BUILD.bazel b/tests/utils/BUILD.bazel index f07d9fb3a3..408f2db55b 100644 --- a/tests/utils/BUILD.bazel +++ b/tests/utils/BUILD.bazel @@ -26,6 +26,7 @@ go_library( "//pkg/common:go_default_library", "//pkg/controller/common:go_default_library", "//pkg/controller/datavolume:go_default_library", + "//pkg/controller/populators:go_default_library", "//pkg/image:go_default_library", "//pkg/util:go_default_library", "//pkg/util/naming:go_default_library", diff --git a/tests/utils/upload.go b/tests/utils/upload.go index 3f9254a309..b829aba0ea 100644 --- a/tests/utils/upload.go +++ b/tests/utils/upload.go @@ -3,7 +3,6 @@ package utils import ( "context" - corev1 "k8s.io/api/core/v1" k8sv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -14,6 +13,7 @@ import ( cdiClientset "kubevirt.io/containerized-data-importer/pkg/client/clientset/versioned" "kubevirt.io/containerized-data-importer/pkg/common" cc "kubevirt.io/containerized-data-importer/pkg/controller/common" + "kubevirt.io/containerized-data-importer/pkg/controller/populators" "kubevirt.io/containerized-data-importer/pkg/util/naming" ) @@ -56,7 +56,11 @@ const ( // UploadPodName returns the name of the upload server pod associated with a PVC func UploadPodName(pvc *k8sv1.PersistentVolumeClaim) string { - return naming.GetResourceName(common.UploadPodName, pvc.Name) + uploadPodNameSuffix := pvc.Name + if pvc.Spec.DataSourceRef != nil { + uploadPodNameSuffix = populators.PVCPrimeName(pvc) + } + return naming.GetResourceName(common.UploadPodName, uploadPodNameSuffix) } // UploadPVCDefinition creates a PVC with the upload target annotation @@ -91,7 +95,7 @@ func UploadPopulationPVCDefinition() *k8sv1.PersistentVolumeClaim { func UploadPopulationBlockPVCDefinition(storageClassName string) *k8sv1.PersistentVolumeClaim { pvcDef := UploadPopulationPVCDefinition() pvcDef.Spec.StorageClassName = &storageClassName - volumeMode := corev1.PersistentVolumeBlock + volumeMode := k8sv1.PersistentVolumeBlock pvcDef.Spec.VolumeMode = &volumeMode return pvcDef }