Skip to content

Commit

Permalink
Merge pull request eksctl-io#226 from mesosphere/dkoshkin/block-volum…
Browse files Browse the repository at this point in the history
…e-e2e

New block volume e2e tests
  • Loading branch information
k8s-ci-robot authored Feb 27, 2019
2 parents 5b3ddbe + af3330f commit c716388
Show file tree
Hide file tree
Showing 5 changed files with 126 additions and 18 deletions.
2 changes: 2 additions & 0 deletions hack/feature-gates.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@
featureGates:
CSIDriverRegistry: "true"
CSINodeInfo: "true"
CSIBlockVolume: "true"
kubelet:
featureGates:
CSIDriverRegistry: "true"
CSINodeInfo: "true"
CSIBlockVolume: "true"
70 changes: 65 additions & 5 deletions tests/e2e/dynamic_provisioning.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() {
},
},
}
test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: ebsDriver,
Pods: pods,
}
Expand Down Expand Up @@ -97,7 +97,7 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() {
},
},
}
test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: ebsDriver,
Pods: pods,
}
Expand All @@ -123,7 +123,7 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() {
},
},
}
test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: ebsDriver,
Pods: pods,
}
Expand Down Expand Up @@ -156,7 +156,7 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() {
},
},
}
test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: ebsDriver,
Pods: pods,
}
Expand Down Expand Up @@ -194,7 +194,67 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() {
},
},
}
test := testsuites.DynamicallyProvisionedWriterReaderVolumeTest{
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: ebsDriver,
Pods: pods,
}
test.Run(cs, ns)
})

It("should create a raw block volume on demand", func() {
pods := []testsuites.PodDetails{
{
Cmd: "dd if=/dev/zero of=/dev/xvda bs=1024k count=100",
Volumes: []testsuites.VolumeDetails{
{
VolumeType: awscloud.VolumeTypeGP2,
FSType: ebscsidriver.FSTypeExt4,
ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2),
VolumeMode: testsuites.Block,
VolumeDevice: testsuites.VolumeDeviceDetails{
NameGenerate: "test-block-volume-",
DevicePath: "/dev/xvda",
},
},
},
},
}
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: ebsDriver,
Pods: pods,
}
test.Run(cs, ns)
})

It("should create a raw block volume and a filesystem volume on demand and bind to the same pod", func() {
pods := []testsuites.PodDetails{
{
Cmd: "dd if=/dev/zero of=/dev/xvda bs=1024k count=100 && echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
Volumes: []testsuites.VolumeDetails{
{
VolumeType: awscloud.VolumeTypeIO1,
FSType: ebscsidriver.FSTypeExt4,
ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeIO1),
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
{
VolumeType: awscloud.VolumeTypeGP2,
FSType: ebscsidriver.FSTypeExt4,
MountOptions: []string{"rw"},
ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2),
VolumeMode: testsuites.Block,
VolumeDevice: testsuites.VolumeDeviceDetails{
NameGenerate: "test-block-volume-",
DevicePath: "/dev/xvda",
},
},
},
},
}
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
CSIDriver: ebsDriver,
Pods: pods,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,21 @@ package testsuites

import (
"github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver"

"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"

. "github.com/onsi/ginkgo"
)

// DynamicallyProvisionedWriterReaderVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s)
// DynamicallyProvisionedCmdVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s)
// Waiting for the PV provisioner to create a new PV
// Testing if the Pod(s) can write and read to mounted volumes
type DynamicallyProvisionedWriterReaderVolumeTest struct {
// Testing if the Pod(s) Cmd is run with a 0 exit code
type DynamicallyProvisionedCmdVolumeTest struct {
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
}

func (t *DynamicallyProvisionedWriterReaderVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
func (t *DynamicallyProvisionedCmdVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver)
// defer must be called here for resources not get removed before using them
Expand Down
32 changes: 27 additions & 5 deletions tests/e2e/testsuites/specs.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,25 +40,43 @@ type VolumeDetails struct {
ReclaimPolicy *v1.PersistentVolumeReclaimPolicy
VolumeBindingMode *storagev1.VolumeBindingMode
AllowedTopologyValues []string
VolumeMode VolumeMode
VolumeMount VolumeMountDetails
VolumeDevice VolumeDeviceDetails
// Optional, used with pre-provisioned volumes
VolumeID string
}

type VolumeMode int

const (
FileSystem VolumeMode = iota
Block
)

type VolumeMountDetails struct {
NameGenerate string
MountPathGenerate string
ReadOnly bool
}

type VolumeDeviceDetails struct {
NameGenerate string
DevicePath string
}

func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestPod, []func()) {
tpod := NewTestPod(client, namespace, pod.Cmd)
cleanupFuncs := make([]func(), 0)
for n, v := range pod.Volumes {
tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver)
cleanupFuncs = append(cleanupFuncs, funcs...)

tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly)
if v.VolumeMode == Block {
tpod.SetupRawBlockVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeDevice.NameGenerate, n+1), v.VolumeDevice.DevicePath)
} else {
tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly)
}
}
return tpod, cleanupFuncs
}
Expand All @@ -70,7 +88,11 @@ func (pod *PodDetails) SetupWithPreProvisionedVolumes(client clientset.Interface
tpvc, funcs := v.SetupPreProvisionedPersistentVolumeClaim(client, namespace, csiDriver)
cleanupFuncs = append(cleanupFuncs, funcs...)

tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly)
if v.VolumeMode == Block {
tpod.SetupRawBlockVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeDevice.NameGenerate, n+1), v.VolumeDevice.DevicePath)
} else {
tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly)
}
}
return tpod, cleanupFuncs
}
Expand All @@ -84,7 +106,7 @@ func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1
createdStorageClass := tsc.Create()
cleanupFuncs = append(cleanupFuncs, tsc.Cleanup)
By("setting up the PVC")
tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, &createdStorageClass)
tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass)
tpvc.Create()
tpvc.WaitForBound()
tpvc.ValidateProvisionedPersistentVolume()
Expand All @@ -104,7 +126,7 @@ func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.
createdStorageClass := tsc.Create()
cleanupFuncs = append(cleanupFuncs, tsc.Cleanup)
By("setting up the PVC and PV")
tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, &createdStorageClass)
tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass)
tpvc.Create()
cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup)
// PV will not be ready until PVC is used in a pod when volumeBindingMode: WaitForFirstConsumer
Expand All @@ -123,7 +145,7 @@ func (volume *VolumeDetails) SetupPreProvisionedPersistentVolumeClaim(client cli
tpv := NewTestPreProvisionedPersistentVolume(client, pv)
tpv.Create()
By("setting up the PVC")
tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, nil)
tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, nil)
tpvc.Create()
cleanupFuncs = append(cleanupFuncs, tpvc.DeleteBoundPersistentVolume)
cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup)
Expand Down
31 changes: 28 additions & 3 deletions tests/e2e/testsuites/testsuites.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,17 +95,23 @@ func (pv *TestPreProvisionedPersistentVolume) Create() v1.PersistentVolume {
type TestPersistentVolumeClaim struct {
client clientset.Interface
claimSize string
volumeMode v1.PersistentVolumeMode
storageClass *storagev1.StorageClass
namespace *v1.Namespace
persistentVolume *v1.PersistentVolume
persistentVolumeClaim *v1.PersistentVolumeClaim
requestedPersistentVolumeClaim *v1.PersistentVolumeClaim
}

func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claimSize string, sc *storagev1.StorageClass) *TestPersistentVolumeClaim {
func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass) *TestPersistentVolumeClaim {
mode := v1.PersistentVolumeFilesystem
if volumeMode == Block {
mode = v1.PersistentVolumeBlock
}
return &TestPersistentVolumeClaim{
client: c,
claimSize: claimSize,
volumeMode: mode,
namespace: ns,
storageClass: sc,
}
Expand All @@ -119,7 +125,7 @@ func (t *TestPersistentVolumeClaim) Create() {
if t.storageClass != nil {
storageClassName = t.storageClass.Name
}
t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize)
t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode)
t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(t.requestedPersistentVolumeClaim)
framework.ExpectNoError(err)
}
Expand All @@ -139,7 +145,7 @@ func (t *TestPersistentVolumeClaim) WaitForBound() v1.PersistentVolumeClaim {
return *t.persistentVolumeClaim
}

func generatePVC(namespace, storageClassName, claimSize string) *v1.PersistentVolumeClaim {
func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.PersistentVolumeMode) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Expand All @@ -155,6 +161,7 @@ func generatePVC(namespace, storageClassName, claimSize string) *v1.PersistentVo
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
},
VolumeMode: &volumeMode,
},
}
}
Expand Down Expand Up @@ -455,6 +462,24 @@ func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath str
t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
}

func (t *TestPod) SetupRawBlockVolume(pvc *v1.PersistentVolumeClaim, name, devicePath string) {
volumeDevice := v1.VolumeDevice{
Name: name,
DevicePath: devicePath,
}
t.pod.Spec.Containers[0].VolumeDevices = append(t.pod.Spec.Containers[0].VolumeDevices, volumeDevice)

volume := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
}
t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
}

func (t *TestPod) SetNodeSelector(nodeSelector map[string]string) {
t.pod.Spec.NodeSelector = nodeSelector
}
Expand Down

0 comments on commit c716388

Please sign in to comment.