Skip to content

Commit

Permalink
csi: fix filesystem write paths in e2e tests (#7361)
Browse files Browse the repository at this point in the history
The writer jobs for the CSI e2e tests will write to locations in the
alloc dir and not arbitrary locations at the root. This changeset
mounts the CSI volume at a location within the alloc dir, uses the
alloc ID as a better-namespaced identifier of the file we write, and
corrects the shell used by the busybox container.
  • Loading branch information
tgross committed Mar 17, 2020
1 parent e8d3bf6 commit c2143b1
Show file tree
Hide file tree
Showing 4 changed files with 72 additions and 35 deletions.
91 changes: 64 additions & 27 deletions e2e/csi/csi.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
package csi

import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"os"
"time"

"github.com/hashicorp/nomad/api"
Expand Down Expand Up @@ -35,46 +38,47 @@ type volumeConfig struct {
}

func (tc *CSIVolumesTest) BeforeAll(f *framework.F) {

t := f.T()
// The volume IDs come from the external provider, so we need
// to read the configuration out of our Terraform output.
rawjson, err := ioutil.ReadFile("csi/input/volumes.json")
if err != nil {
f.T().Skip("volume ID configuration not found, try running 'terraform output volumes > ../csi/input/volumes.json'")
t.Skip("volume ID configuration not found, try running 'terraform output volumes > ../csi/input/volumes.json'")
}
volumeIDs := &volumeConfig{}
err = json.Unmarshal(rawjson, volumeIDs)
if err != nil {
f.T().Fatal("volume ID configuration could not be read")
t.Fatal("volume ID configuration could not be read")
}

tc.volumeIDs = volumeIDs

// Ensure cluster has leader and at least two client
// nodes in a ready state before running tests
e2eutil.WaitForLeader(f.T(), tc.Nomad())
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 2)
e2eutil.WaitForLeader(t, tc.Nomad())
e2eutil.WaitForNodesReady(t, tc.Nomad(), 2)
}

// TestEBSVolumeClaim launches AWS EBS plugins and registers an EBS volume
// as a Nomad CSI volume. We then deploy a job that writes to the volume,
// stop that job, and reuse the volume for another job which should be able
// to read the data written by the first job.
func (tc *CSIVolumesTest) TestEBSVolumeClaim(f *framework.F) {
require := require.New(f.T())
t := f.T()
require := require.New(t)
nomadClient := tc.Nomad()
uuid := uuid.Generate()

// deploy the controller plugin job
controllerJobID := "aws-ebs-plugin-controller" + uuid[0:8]
controllerJobID := "aws-ebs-plugin-controller-" + uuid[0:8]
tc.jobIds = append(tc.jobIds, controllerJobID)
e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient,
e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
"csi/input/plugin-aws-ebs-controller.nomad", controllerJobID, "")

// deploy the node plugins job
nodesJobID := "aws-ebs-plugin-nodes" + uuid[0:8]
nodesJobID := "aws-ebs-plugin-nodes-" + uuid[0:8]
tc.jobIds = append(tc.jobIds, nodesJobID)
e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient,
e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
"csi/input/plugin-aws-ebs-nodes.nomad", nodesJobID, "")

// wait for plugin to become healthy
Expand Down Expand Up @@ -106,25 +110,40 @@ func (tc *CSIVolumesTest) TestEBSVolumeClaim(f *framework.F) {
defer nomadClient.CSIVolumes().Deregister(volID, nil)

// deploy a job that writes to the volume
writeJobID := "write-" + uuid[0:8]
e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient,
writeJobID := "write-ebs-" + uuid[0:8]
tc.jobIds = append(tc.jobIds, writeJobID)
writeAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
"csi/input/use-ebs-volume.nomad", writeJobID, "")
writeAllocID := writeAllocs[0].ID
e2eutil.WaitForAllocRunning(t, nomadClient, writeAllocID)

// read data from volume and assert the writer wrote a file to it
writeAlloc, _, err := nomadClient.Allocations().Info(writeAllocID, nil)
require.NoError(err)
expectedPath := "/local/test/" + writeAllocID
_, err = readFile(nomadClient, writeAlloc, expectedPath)
require.NoError(err)

// Shutdown the writer so we can run a reader.
// we could mount the EBS volume with multi-attach, but we
// want this test to exercise the unpublish workflow.
nomadClient.Jobs().Deregister(writeJobID, true, nil)

// deploy a job so we can read from the volume
readJobID := "read-" + uuid[0:8]
allocs := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient,
readJobID := "read-ebs-" + uuid[0:8]
tc.jobIds = append(tc.jobIds, readJobID)
readAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
"csi/input/use-ebs-volume.nomad", readJobID, "")
readAllocID := readAllocs[0].ID
e2eutil.WaitForAllocRunning(t, nomadClient, readAllocID)

// ensure we clean up claim before we deregister volumes
defer nomadClient.Jobs().Deregister(readJobID, true, nil)

// read data from volume and assert the writer wrote a file to it
alloc, _, err := nomadClient.Allocations().Info(allocs[0].ID, nil)
readAlloc, _, err := nomadClient.Allocations().Info(readAllocID, nil)
require.NoError(err)
expectedPath := "/test/" + writeJobID
_, _, err = nomadClient.AllocFS().Stat(alloc, expectedPath, nil)
_, err = readFile(nomadClient, readAlloc, expectedPath)
require.NoError(err)
}

Expand All @@ -133,14 +152,15 @@ func (tc *CSIVolumesTest) TestEBSVolumeClaim(f *framework.F) {
// and share the volume with another job which should be able to read the
// data written by the first job.
func (tc *CSIVolumesTest) TestEFSVolumeClaim(f *framework.F) {
require := require.New(f.T())
t := f.T()
require := require.New(t)
nomadClient := tc.Nomad()
uuid := uuid.Generate()

// deploy the node plugins job (no need for a controller for EFS)
nodesJobID := "aws-efs-plugin-nodes" + uuid[0:8]
nodesJobID := "aws-efs-plugin-nodes-" + uuid[0:8]
tc.jobIds = append(tc.jobIds, nodesJobID)
e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient,
e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
"csi/input/plugin-aws-efs-nodes.nomad", nodesJobID, "")

// wait for plugin to become healthy
Expand Down Expand Up @@ -172,24 +192,26 @@ func (tc *CSIVolumesTest) TestEFSVolumeClaim(f *framework.F) {
defer nomadClient.CSIVolumes().Deregister(volID, nil)

// deploy a job that writes to the volume
writeJobID := "write-" + uuid[0:8]
e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient,
writeJobID := "write-efs-" + uuid[0:8]
writeAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
"csi/input/use-efs-volume-write.nomad", writeJobID, "")
defer nomadClient.Jobs().Deregister(writeJobID, true, nil)
e2eutil.WaitForAllocRunning(t, nomadClient, writeAllocs[0].ID)

// deploy a job that reads from the volume. we don't stop the
// writer job in this case because we want to exercise
// a multiple-access mode
readJobID := "read-" + uuid[0:8]
allocs := e2eutil.RegisterAndWaitForAllocs(f.T(), nomadClient,
readJobID := "read-efs-" + uuid[0:8]
readAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient,
"csi/input/use-efs-volume-read.nomad", readJobID, "")
defer nomadClient.Jobs().Deregister(readJobID, true, nil)
e2eutil.WaitForAllocRunning(t, nomadClient, readAllocs[0].ID)

// read data from volume and assert the writer wrote a file to it
alloc, _, err := nomadClient.Allocations().Info(allocs[0].ID, nil)
readAlloc, _, err := nomadClient.Allocations().Info(readAllocs[0].ID, nil)
require.NoError(err)
expectedPath := "/test/" + writeJobID
_, _, err = nomadClient.AllocFS().Stat(alloc, expectedPath, nil)
expectedPath := "/local/test/" + writeAllocs[0].ID
_, err = readFile(nomadClient, readAlloc, expectedPath)
require.NoError(err)
}

Expand All @@ -203,3 +225,18 @@ func (tc *CSIVolumesTest) AfterEach(f *framework.F) {
// Garbage collect
nomadClient.System().GarbageCollect()
}

// TODO(tgross): replace this w/ AllocFS().Stat() after
// https://github.com/hashicorp/nomad/issues/7365 is fixed
func readFile(client *api.Client, alloc *api.Allocation, path string) (bytes.Buffer, error) {
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelFn()

var stdout, stderr bytes.Buffer
_, err := client.Allocations().Exec(ctx,
alloc, "task", false,
[]string{"cat", path},
os.Stdin, &stdout, &stderr,
make(chan api.TerminalSize), nil)
return stdout, err
}
6 changes: 3 additions & 3 deletions e2e/csi/input/use-ebs-volume.nomad
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ job "use-ebs-volume" {

config {
image = "busybox:1"
command = "bash"
args = ["-c", "touch /test/${NOMAD_JOB_NAME}; sleep 3600"]
command = "/bin/sh"
args = ["-c", "touch /local/test/${NOMAD_ALLOC_ID}; sleep 3600"]
}

volume_mount {
volume = "test"
destination = "/test"
destination = "${NOMAD_TASK_DIR}/test"
read_only = false
}

Expand Down
4 changes: 2 additions & 2 deletions e2e/csi/input/use-efs-volume-read.nomad
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@ job "use-efs-volume" {

config {
image = "busybox:1"
command = "bash"
command = "/bin/sh"
args = ["-c", "sleep 3600"]
}

volume_mount {
volume = "test"
destination = "/test"
destination = "${NOMAD_TASK_DIR}/test"
read_only = true
}

Expand Down
6 changes: 3 additions & 3 deletions e2e/csi/input/use-efs-volume-write.nomad
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ job "use-efs-volume" {

config {
image = "busybox:1"
command = "bash"
args = ["-c", "touch /test/${NOMAD_JOB_NAME}; sleep 3600"]
command = "/bin/sh"
args = ["-c", "touch /local/test/${NOMAD_ALLOC_ID}; sleep 3600"]
}

volume_mount {
volume = "test"
destination = "/test"
destination = "${NOMAD_TASK_DIR}/test"
read_only = false
}

Expand Down

0 comments on commit c2143b1

Please sign in to comment.