Skip to content

Commit

Permalink
fix for smart clones, rearranging with for-loops
Browse files Browse the repository at this point in the history
Signed-off-by: Ido Aharon <iaharon@redhat.com>
  • Loading branch information
ido106 committed Mar 28, 2023
1 parent fa44e08 commit 8d0650e
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 57 deletions.
4 changes: 2 additions & 2 deletions pkg/controller/clone-controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -521,7 +521,7 @@ func (r *CloneReconciler) CreateCloneSourcePod(image, pullPolicy string, pvc *co
sourceVolumeMode = corev1.PersistentVolumeFilesystem
}

pod := MakeCloneSourcePodSpec(sourceVolumeMode, image, pullPolicy, imagePullSecrets, ownerKey, serverCABundle, pvc, sourcePvc, podResourceRequirements, workloadNodePlacement)
pod := MakeCloneSourcePodSpec(sourceVolumeMode, image, pullPolicy, ownerKey, imagePullSecrets, serverCABundle, pvc, sourcePvc, podResourceRequirements, workloadNodePlacement)
util.SetRecommendedLabels(pod, r.installerLabels, "cdi-controller")

if err := r.client.Create(context.TODO(), pod); err != nil {
Expand All @@ -534,7 +534,7 @@ func (r *CloneReconciler) CreateCloneSourcePod(image, pullPolicy string, pvc *co
}

// MakeCloneSourcePodSpec creates and returns the clone source pod spec based on the target pvc.
func MakeCloneSourcePodSpec(sourceVolumeMode corev1.PersistentVolumeMode, image, pullPolicy string, imagePullSecrets []corev1.LocalObjectReference, ownerRefAnno string,
func MakeCloneSourcePodSpec(sourceVolumeMode corev1.PersistentVolumeMode, image, pullPolicy, ownerRefAnno string, imagePullSecrets []corev1.LocalObjectReference,
serverCACert []byte, targetPvc, sourcePvc *corev1.PersistentVolumeClaim, resourceRequirements *corev1.ResourceRequirements,
workloadNodePlacement *sdkapi.NodePlacement) *corev1.Pod {

Expand Down
100 changes: 45 additions & 55 deletions tests/cloner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1785,21 +1785,25 @@ var _ = Describe("all clone tests", func() {
tinyCoreIsoURL := func() string { return fmt.Sprintf(utils.TinyCoreIsoURL, f.CdiInstallNs) }

var (
sourceDv, targetDv1, targetDv2, targetDv3 *cdiv1.DataVolume
err error
sourceDv *cdiv1.DataVolume
targetDvs []*cdiv1.DataVolume
err error
)

AfterEach(func() {
dvs := []*cdiv1.DataVolume{sourceDv, targetDv1, targetDv2, targetDv3}
for _, dv := range dvs {
targetDvs = append(targetDvs, sourceDv)
for _, dv := range targetDvs {
cleanDv(f, dv)
if dv != nil && dv.Status.Phase == cdiv1.Succeeded {
validateCloneType(f, dv)
}
}
targetDvs = nil
})

It("[rfe_id:1277][test_id:1899][crit:High][vendor:cnv-qe@redhat.com][level:component] Should allow multiple cloning operations in parallel", func() {
const NumOfClones int = 3

By("Creating a source from a real image")
sourceDv = utils.NewDataVolumeWithHTTPImport("source-dv", "200Mi", tinyCoreIsoURL())
sourceDv, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, sourceDv)
Expand All @@ -1819,61 +1823,57 @@ var _ = Describe("all clone tests", func() {
_, err = utils.WaitPodDeleted(f.K8sClient, "execute-command", f.Namespace.Name, verifyPodDeletedTimeout)
Expect(err).ToNot(HaveOccurred())

// By not waiting for completion, we will start 3 transfers in parallell
By("Cloning from the source DataVolume to target1")
targetDv1 = utils.NewDataVolumeForImageCloning("target-dv1", "200Mi", f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv1.Annotations[controller.AnnPodRetainAfterCompletion] = "true"
targetDv1, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv1)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv1)

By("Cloning from the source DataVolume to target2 in parallel")
targetDv2 = utils.NewDataVolumeForImageCloning("target-dv2", "200Mi", f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv2.Annotations[controller.AnnPodRetainAfterCompletion] = "true"
targetDv2, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv2)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv2)

By("Cloning from the source DataVolume to target3 in parallel")
targetDv3 = utils.NewDataVolumeForImageCloning("target-dv3", "200Mi", f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv3.Annotations[controller.AnnPodRetainAfterCompletion] = "true"
targetDv3, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv3)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv3)
// By not waiting for completion, we will start 3 transfers in parallel
By("Cloning #NumOfClones times in parallel")
for i := 1; i <= NumOfClones; i++ {
By("Cloning from the source DataVolume to target" + strconv.Itoa(i))
targetDv := utils.NewDataVolumeForImageCloning("target-dv"+strconv.Itoa(i), "200Mi", f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv.Annotations[controller.AnnPodRetainAfterCompletion] = "true"
targetDv, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv)
targetDvs = append(targetDvs, targetDv)
}

dvs := []*cdiv1.DataVolume{targetDv1, targetDv2, targetDv3}
podsNodeName := make(map[string]string)
for _, dv := range dvs {
for _, dv := range targetDvs {
By("Waiting for clone to be completed")
err = utils.WaitForDataVolumePhaseWithTimeout(f, f.Namespace.Name, cdiv1.Succeeded, dv.Name, 3*90*time.Second)
Expect(err).ToNot(HaveOccurred())
}

for _, dv := range dvs {
for _, dv := range targetDvs {
By("Verifying MD5 sum matches")
matchFile := filepath.Join(testBaseDir, "disk.img")
Expect(f.VerifyTargetPVCContentMD5(f.Namespace, utils.PersistentVolumeClaimFromDataVolume(dv), matchFile, md5sum[:32])).To(BeTrue())

pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())

By(fmt.Sprintf("Getting pod %s/%s", dv.Namespace, pvc.Annotations[ctrl.AnnCloneSourcePod]))
pod, err := f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), pvc.Annotations[ctrl.AnnCloneSourcePod], metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
podsNodeName[dv.Name] = pod.Spec.NodeName
if cloneSourcePod := pvc.Annotations[ctrl.AnnCloneSourcePod]; cloneSourcePod != "" {
By(fmt.Sprintf("Getting pod %s/%s", dv.Namespace, cloneSourcePod))
pod, err := f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), cloneSourcePod, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
podsNodeName[dv.Name] = pod.Spec.NodeName
}

By("Deleting verifier pod")
err = f.K8sClient.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), utils.VerifierPodName, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
_, err = utils.WaitPodDeleted(f.K8sClient, utils.VerifierPodName, f.Namespace.Name, verifyPodDeletedTimeout)
Expect(err).ToNot(HaveOccurred())
}

// All pods should be in the same node
Expect(podsNodeName[targetDv1.Name]).To(Equal(podsNodeName[targetDv2.Name]))
Expect(podsNodeName[targetDv2.Name]).To(Equal(podsNodeName[targetDv3.Name]))
if len(podsNodeName) > 0 {
Expect(podsNodeName[targetDvs[0].Name]).To(Equal(podsNodeName[targetDvs[1].Name]))
Expect(podsNodeName[targetDvs[1].Name]).To(Equal(podsNodeName[targetDvs[2].Name]))
}
})

It("[rfe_id:1277][test_id:1899][crit:High][vendor:cnv-qe@redhat.com][level:component] Should allow multiple cloning operations in parallel for block devices", func() {
const NumOfClones int = 3

if !f.IsBlockVolumeStorageClassAvailable() {
Skip("Storage Class for block volume is not available")
}
Expand All @@ -1892,39 +1892,29 @@ var _ = Describe("all clone tests", func() {
fmt.Fprintf(GinkgoWriter, "INFO: MD5SUM for source is: %s\n", md5sum[:32])

// By not waiting for completion, we will start 3 transfers in parallell
By("Cloning from the source DataVolume to target1")
targetDv1 = utils.NewDataVolumeForImageCloning("target-dv1", "200Mi", f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv1, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv1)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv1)

By("Cloning from the source DataVolume to target2 in parallel")
targetDv2 = utils.NewDataVolumeForImageCloning("target-dv2", "200Mi", f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv2, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv2)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv2)

By("Cloning from the source DataVolume to target3 in parallel")
targetDv3 = utils.NewDataVolumeForImageCloning("target-dv3", "200Mi", f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv3, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv3)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv3)
By("Cloning #NumOfClones times in parallel")
for i := 1; i <= NumOfClones; i++ {
By("Cloning from the source DataVolume to target" + strconv.Itoa(i))
targetDv := utils.NewDataVolumeForImageCloning("target-dv"+strconv.Itoa(i), "200Mi", f.Namespace.Name, sourceDv.Name, sourceDv.Spec.PVC.StorageClassName, sourceDv.Spec.PVC.VolumeMode)
targetDv, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, targetDv)
Expect(err).ToNot(HaveOccurred())
f.ForceBindPvcIfDvIsWaitForFirstConsumer(targetDv)
targetDvs = append(targetDvs, targetDv)
}

dvs := []*cdiv1.DataVolume{targetDv1, targetDv2, targetDv3}
for _, dv := range dvs {
for _, dv := range targetDvs {
By("Waiting for clone to be completed")
err = utils.WaitForDataVolumePhaseWithTimeout(f, f.Namespace.Name, cdiv1.Succeeded, dv.Name, 3*90*time.Second)
Expect(err).ToNot(HaveOccurred())
}

for _, dv := range dvs {
for _, dv := range targetDvs {
By("Verifying MD5 sum matches")
Expect(f.VerifyTargetPVCContentMD5(f.Namespace, utils.PersistentVolumeClaimFromDataVolume(dv), testBaseDir, md5sum[:32])).To(BeTrue())
By("Deleting verifier pod")
err = utils.DeleteVerifierPod(f.K8sClient, f.Namespace.Name)
Expect(err).ToNot(HaveOccurred())
}

})
})

Expand Down

0 comments on commit 8d0650e

Please sign in to comment.