Skip to content

Commit

Permalink
e2e/storage: test usage of volume in multiple pods at once
Browse files Browse the repository at this point in the history
This is a special case that both kubelet and the volume driver should
support, because users might expect it. One Kubernetes mechanism to
deploy pods like this is via pod affinity.

However, strictly speaking the CSI spec does not allow this usage
mode (see container-storage-interface/spec#150) and
there is an on-going debate to enable it (see
container-storage-interface/spec#178). Therefore
this test gets skipped unless explicitly enabled for a driver.

CSI drivers which create a block device for a remote volume in
NodePublishVolume fail this test. They have to make the volume
available in NodeStageVolume and then in NodePublishVolume merely do a
bind mount (as for example in
https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/pkg/gce-pd-csi-driver/node.go#L150).
  • Loading branch information
pohly committed Feb 12, 2019
1 parent ca42cf4 commit 03d352f
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 2 deletions.
4 changes: 2 additions & 2 deletions test/e2e/storage/drivers/csi.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath", config,
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true},
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
Expand Down Expand Up @@ -259,7 +259,7 @@ func (m *mockCSIDriver) CleanupDriver() {
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath-v0", config,
map[testsuites.Capability]bool{testsuites.CapPersistence: true},
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
Expand Down
48 changes: 48 additions & 0 deletions test/e2e/storage/testsuites/provisioning.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package testsuites

import (
"fmt"
"sync"
"time"

. "github.com/onsi/ginkgo"
Expand Down Expand Up @@ -245,6 +246,50 @@ func testProvisioning(input *provisioningTestInput) {
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})

It("should allow concurrent writes on the same node", func() {
if !input.dInfo.Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name)
}
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// We start two pods concurrently on the same node,
// using the same PVC. Both wait for other to create a
// file before returning. The pods are forced onto the
// same node via pod affinity.
wg := sync.WaitGroup{}
wg.Add(2)
firstPodName := "pvc-tester-first"
secondPodName := "pvc-tester-second"
run := func(podName, command string) {
defer GinkgoRecover()
defer wg.Done()
node := NodeSelection{
Name: input.nodeName,
}
if podName == secondPodName {
node.Affinity = &v1.Affinity{
PodAffinity: &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
// Set by RunInPodWithVolume.
"app": firstPodName,
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
}
}
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node)
}
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
wg.Wait()
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
}

// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass
Expand Down Expand Up @@ -561,6 +606,9 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: podName + "-",
Labels: map[string]string{
"app": podName,
},
},
Spec: v1.PodSpec{
NodeName: node.Name,
Expand Down
7 changes: 7 additions & 0 deletions test/e2e/storage/testsuites/testdriver.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,13 @@ const (
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
CapExec Capability = "exec" // exec a file in the volume
CapDataSource Capability = "dataSource" // support populate data from snapshot

// multiple pods on a node can use the same volume concurrently;
// for CSI, see:
// - https://github.com/container-storage-interface/spec/pull/150
// - https://github.com/container-storage-interface/spec/issues/178
// - NodeStageVolume in the spec
CapMultiPODs Capability = "multipods"
)

// DriverInfo represents a combination of parameters to be used in implementation of TestDriver
Expand Down

0 comments on commit 03d352f

Please sign in to comment.