From 314b734e98bf26431660e63fa9029086379ec1b3 Mon Sep 17 00:00:00 2001 From: Martin Hrabovcin Date: Mon, 4 Jul 2016 22:15:36 +0200 Subject: [PATCH] Add EFS storage driver --- .docs/user-guide/storage-providers.md | 90 +++ drivers/storage/efs/efs.go | 24 + drivers/storage/efs/executor/efs_executor.go | 157 +++++ drivers/storage/efs/storage/efs_storage.go | 594 +++++++++++++++++++ drivers/storage/efs/tests/efs_test.go | 301 ++++++++++ glide.lock | 2 + glide.yaml | 6 + imports/executors/imports_executor.go | 1 + imports/remote/imports_remote.go | 1 + 9 files changed, 1176 insertions(+) create mode 100644 drivers/storage/efs/efs.go create mode 100644 drivers/storage/efs/executor/efs_executor.go create mode 100644 drivers/storage/efs/storage/efs_storage.go create mode 100644 drivers/storage/efs/tests/efs_test.go diff --git a/.docs/user-guide/storage-providers.md b/.docs/user-guide/storage-providers.md index c257fa82..86e3b018 100644 --- a/.docs/user-guide/storage-providers.md +++ b/.docs/user-guide/storage-providers.md @@ -304,3 +304,93 @@ libstorage: - Snapshot and create volume from volume functionality is not available yet with this driver. - The driver supports VirtualBox 5.0.10+ + +## AWS EFS +The AWS EFS driver registers a storage driver named `efs` with the +`libStorage` driver manager and is used to connect and manage AWS Elastic File +Systems. + +### Requirements + +* AWS account +* VPC - EFS can be accessed within VPC +* AWS Credentials + +### Configuration +The following is an example with all possible fields configured. For a running +example see the `Examples` section. + +```yaml +efs: + accessKey: XXXXXXXXXX + secretKey: XXXXXXXXXX + securityGroups: sg-XXXXXXX,sg-XXXXXX0,sg-XXXXXX1 + region: us-east-1 + tag: test +``` + +#### Configuration Notes +- The `accessKey` and `secretKey` configuration parameters are optional and should +be used when explicit AWS credentials configuration needs to be provided. EFS driver +uses official golang AWS SDK library and supports all other ways of providing +access credentials, like environment variables or instance profile IAM permissions. +- `region` represents AWS region where should be EFS provisioned. See official AWS +documentation for list of supported regions. +- `securityGroups` list of security groups attached to `MountPoint` instances. +If no security groups are provided the default VPC security group is used. +- `tag` is used to partition multiple services within single AWS account and is +used as prefix for EFS names in format `[tagprefix]/volumeName`. + +For information on the equivalent environment variable and CLI flag names +please see the section on how non top-level configuration properties are +[transformed](./config.md#configuration-properties). + +### Runtime Behavior +AWS EFS storage driver creates one EFS FileSystem per volume and provides root +of the filesystem as NFS mount point. Volumes aren't attached to instances +directly but rather exposed to each subnet by creating `MountPoint` in each VPC +subnet. When detaching volume from instance no action is taken as there isn't +good way to figure out if there are other instances in same subnet using +`MountPoint` that is being detached. There is no charge for `MountPoint` +so they are removed only once whole volume is deleted. + +By default all EFS instances are provisioned as `generalPurpose` performance mode. +`maxIO` EFS type can be provisioned by providing `maxIO` flag as `volumetype`. + +**NOTE**: Each EFS FileSystem can be accessed only from single VPC by the time. + +### Activating the Driver +To activate the AWS EFS driver please follow the instructions for +[activating storage drivers](./config.md#storage-drivers), +using `efs` as the driver name. + +### Troubleshooting +- Make sure that AWS credentials (user or role) has following AWS permissions: + - `elasticfilesystem:CreateFileSystem` + - `elasticfilesystem:CreateMountTarget` + - `ec2:DescribeSubnets` + - `ec2:DescribeNetworkInterfaces` + - `ec2:CreateNetworkInterface` + - `elasticfilesystem:CreateTags` + - `elasticfilesystem:DeleteFileSystem` + - `elasticfilesystem:DeleteMountTarget` + - `ec2:DeleteNetworkInterface` + - `elasticfilesystem:DescribeFileSystems` + - `elasticfilesystem:DescribeMountTargets` + +### Examples +Below is a working `config.yml` file that works with AWS EFS. + +```yaml +libstorage: + server: + services: + efs: + driver: efs + efs: + accessKey: XXXXXXXXXX + secretKey: XXXXXXXXXX + securityGroups: sg-XXXXXXX,sg-XXXXXX0,sg-XXXXXX1 + region: us-east-1 + tag: test +``` \ No newline at end of file diff --git a/drivers/storage/efs/efs.go b/drivers/storage/efs/efs.go new file mode 100644 index 00000000..3f77bd58 --- /dev/null +++ b/drivers/storage/efs/efs.go @@ -0,0 +1,24 @@ +package efs + +import ( + "github.com/akutz/gofig" +) + +const ( + // Name is the provider's name. + Name = "efs" +) + +func init() { + registerConfig() +} + +func registerConfig() { + r := gofig.NewRegistration("EFS") + r.Key(gofig.String, "", "", "", "efs.accessKey") + r.Key(gofig.String, "", "", "", "efs.secretKey") + r.Key(gofig.String, "", "", "Comma separated security group ids", "efs.securityGroups") + r.Key(gofig.String, "", "", "AWS region", "efs.region") + r.Key(gofig.String, "", "", "Tag prefix for EFS naming", "efs.tag") + gofig.Register(r) +} diff --git a/drivers/storage/efs/executor/efs_executor.go b/drivers/storage/efs/executor/efs_executor.go new file mode 100644 index 00000000..94d432ff --- /dev/null +++ b/drivers/storage/efs/executor/efs_executor.go @@ -0,0 +1,157 @@ +package executor + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/akutz/gofig" + "github.com/akutz/goof" + + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + + "github.com/emccode/libstorage/api/registry" + "github.com/emccode/libstorage/api/types" + "github.com/emccode/libstorage/drivers/storage/efs" +) + +// driver is the storage executor for the efs storage driver. +type driver struct { + config gofig.Config +} + +const ( + idDelimiter = "/" + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +func init() { + registry.RegisterStorageExecutor(efs.Name, newDriver) +} + +func newDriver() types.StorageExecutor { + return &driver{} +} + +func (d *driver) Init(ctx types.Context, config gofig.Config) error { + d.config = config + return nil +} + +func (d *driver) Name() string { + return efs.Name +} + +// InstanceID returns the local instance ID for the test +func InstanceID() (*types.InstanceID, error) { + return newDriver().InstanceID(nil, nil) +} + +// InstanceID returns the aws instance configuration +func (d *driver) InstanceID( + ctx types.Context, + opts types.Store) (*types.InstanceID, error) { + + svc := ec2metadata.New(session.New()) + if !svc.Available() { + return nil, goof.New("EC2Metadata service not available") + } + + mac, err := svc.GetMetadata("mac") + if err != nil { + return nil, goof.WithError("no ec2metadata mac address", err) + } + + subnetID, err := svc.GetMetadata(fmt.Sprintf("network/interfaces/macs/%s/subnet-id", mac)) + if err != nil { + return nil, goof.WithError("no ec2metadata subnet id", err) + } + + iid := &types.InstanceID{Driver: efs.Name} + if err := iid.MarshalMetadata(subnetID); err != nil { + return nil, err + } + + return iid, nil +} + +func (d *driver) NextDevice( + ctx types.Context, + opts types.Store) (string, error) { + return "", types.ErrNotImplemented +} + +func (d *driver) LocalDevices( + ctx types.Context, + opts *types.LocalDevicesOpts) (*types.LocalDevices, error) { + + mtt, err := parseMountTable() + if err != nil { + return nil, err + } + + // TODO(mhrabovcin): Filter out only AWS NFS mounts? + idmnt := make(map[string]string) + for _, mt := range mtt { + idmnt[mt.Source] = mt.MountPoint + } + + return &types.LocalDevices{ + Driver: efs.Name, + DeviceMap: idmnt, + }, nil +} + +func parseMountTable() ([]*types.MountInfo, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*types.MountInfo, error) { + var ( + s = bufio.NewScanner(r) + out = []*types.MountInfo{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &types.MountInfo{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.MountPoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.FSType = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VFSOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} diff --git a/drivers/storage/efs/storage/efs_storage.go b/drivers/storage/efs/storage/efs_storage.go new file mode 100644 index 00000000..1add7e26 --- /dev/null +++ b/drivers/storage/efs/storage/efs_storage.go @@ -0,0 +1,594 @@ +package storage + +import ( + "errors" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/akutz/gofig" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + awsefs "github.com/aws/aws-sdk-go/service/efs" + + "github.com/emccode/libstorage/api/context" + "github.com/emccode/libstorage/api/registry" + "github.com/emccode/libstorage/api/types" + "github.com/emccode/libstorage/drivers/storage/efs" +) + +const ( + tagDelimiter = "/" +) + +// Driver represents a EFS driver implementation of StorageDriver +type driver struct { + config gofig.Config + awsCreds *credentials.Credentials +} + +func init() { + registry.RegisterStorageDriver(efs.Name, newDriver) +} + +func newDriver() types.StorageDriver { + return &driver{} +} + +// Name returns the name of the driver +func (d *driver) Name() string { + return efs.Name +} + +// Init initializes the driver. +func (d *driver) Init(ctx types.Context, config gofig.Config) error { + d.config = config + + fields := log.Fields{ + "accessKey": d.accessKey(), + "secretKey": d.secretKey(), + "region": d.region(), + "tag": d.tag(), + } + + if d.accessKey() == "" { + fields["accessKey"] = "" + } else { + fields["accessKey"] = "******" + } + + if d.secretKey() == "" { + fields["secretKey"] = "" + } else { + fields["secretKey"] = "******" + } + + d.awsCreds = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.StaticProvider{Value: credentials.Value{AccessKeyID: d.accessKey(), SecretAccessKey: d.secretKey()}}, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New()), + }, + }) + + log.WithFields(fields).Info("storage driver initialized") + return nil +} + +// InstanceInspect returns an instance. +func (d *driver) InstanceInspect( + ctx types.Context, + opts types.Store) (*types.Instance, error) { + + iid := context.MustInstanceID(ctx) + if iid.ID != "" { + return &types.Instance{InstanceID: iid}, nil + } + + var awsSubnetID string + if err := iid.UnmarshalMetadata(&awsSubnetID); err != nil { + return nil, err + } + instanceID := &types.InstanceID{ID: awsSubnetID, Driver: d.Name()} + + return &types.Instance{InstanceID: instanceID}, nil +} + +// Type returns the type of storage a driver provides +func (d *driver) Type(ctx types.Context) (types.StorageType, error) { + return types.NAS, nil +} + +// NextDeviceInfo returns the information about the driver's next available +// device workflow. +func (d *driver) NextDeviceInfo( + ctx types.Context) (*types.NextDeviceInfo, error) { + return nil, nil +} + +// Volumes returns all volumes or a filtered list of volumes. +func (d *driver) Volumes( + ctx types.Context, + opts *types.VolumesOpts) ([]*types.Volume, error) { + + fileSystems, err := d.getAllFileSystems() + if err != nil { + return nil, err + } + + var volumesSD []*types.Volume + for _, fileSystem := range fileSystems { + // Only volumes with partition prefix + if !strings.HasPrefix(*fileSystem.Name, d.tag()+tagDelimiter) { + continue + } + + // Only volumes in "available" state + if *fileSystem.LifeCycleState != awsefs.LifeCycleStateAvailable { + continue + } + + volumeSD := &types.Volume{ + Name: d.getPrintableName(*fileSystem.Name), + ID: *fileSystem.FileSystemId, + Size: *fileSystem.SizeInBytes.Value, + Attachments: nil, + } + + var atts []*types.VolumeAttachment + if opts.Attachments { + atts, err = d.getVolumeAttachments(ctx, *fileSystem.FileSystemId) + if err != nil { + return nil, err + } + } + if len(atts) > 0 { + volumeSD.Attachments = atts + } + volumesSD = append(volumesSD, volumeSD) + } + + return volumesSD, nil +} + +// VolumeInspect inspects a single volume. +func (d *driver) VolumeInspect( + ctx types.Context, + volumeID string, + opts *types.VolumeInspectOpts) (*types.Volume, error) { + + resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{ + FileSystemId: aws.String(volumeID), + }) + if err != nil { + return nil, err + } + if len(resp.FileSystems) > 0 { + fileSystem := resp.FileSystems[0] + + // Only volumes in "available" state + if *fileSystem.LifeCycleState != awsefs.LifeCycleStateAvailable { + return nil, nil + } + + volume := &types.Volume{ + Name: d.getPrintableName(*fileSystem.Name), + ID: *fileSystem.FileSystemId, + Size: *fileSystem.SizeInBytes.Value, + Attachments: nil, + } + + var atts []*types.VolumeAttachment + + if opts.Attachments { + atts, err = d.getVolumeAttachments(ctx, *fileSystem.FileSystemId) + if err != nil { + return nil, err + } + } + if len(atts) > 0 { + volume.Attachments = atts + } + return volume, nil + } + + return nil, types.ErrNotFound{} +} + +// VolumeCreate creates a new volume. +func (d *driver) VolumeCreate( + ctx types.Context, + name string, + opts *types.VolumeCreateOpts) (*types.Volume, error) { + + request := &awsefs.CreateFileSystemInput{ + CreationToken: aws.String(name), + PerformanceMode: aws.String(awsefs.PerformanceModeGeneralPurpose), + } + if opts.Type != nil && strings.ToLower(*opts.Type) == "maxio" { + request.PerformanceMode = aws.String(awsefs.PerformanceModeMaxIo) + } + fileSystem, err := d.efsClient().CreateFileSystem(request) + + if err != nil { + return nil, err + } + + _, err = d.efsClient().CreateTags(&awsefs.CreateTagsInput{ + FileSystemId: fileSystem.FileSystemId, + Tags: []*awsefs.Tag{ + { + Key: aws.String("Name"), + Value: aws.String(d.getFullVolumeName(name)), + }, + }, + }) + + if err != nil { + return nil, err + } + + // Wait until FS is in "available" state + for { + state, err := d.getFileSystemLifeCycleState(*fileSystem.FileSystemId) + if err == nil { + if state != awsefs.LifeCycleStateCreating { + break + } + log.WithFields(log.Fields{ + "state": state, + "filesystemid": *fileSystem.FileSystemId, + }).Info("EFS not ready") + } else { + log.WithFields(log.Fields{ + "error": err, + "filesystemid": *fileSystem.FileSystemId, + }).Error("failed to retrieve EFS state") + } + // Wait for 2 seconds + <-time.After(2 * time.Second) + } + + return d.VolumeInspect(ctx, *fileSystem.FileSystemId, + &types.VolumeInspectOpts{Attachments: false}) +} + +// VolumeRemove removes a volume. +func (d *driver) VolumeRemove( + ctx types.Context, + volumeID string, + opts types.Store) error { + + // Remove MountTarget(s) + resp, err := d.efsClient().DescribeMountTargets( + &awsefs.DescribeMountTargetsInput{ + FileSystemId: aws.String(volumeID), + }) + if err != nil { + return err + } + + for _, mountTarget := range resp.MountTargets { + _, err = d.efsClient().DeleteMountTarget( + &awsefs.DeleteMountTargetInput{ + MountTargetId: aws.String(*mountTarget.MountTargetId), + }) + + if err != nil { + return err + } + } + + // FileSystem can be deleted only after all mountpoints are deleted ( + // just in "deleting" life cycle state). Here code will wait until all + // mountpoints are deleted. + for { + resp, err := d.efsClient().DescribeMountTargets( + &awsefs.DescribeMountTargetsInput{ + FileSystemId: aws.String(volumeID), + }) + if err != nil { + return err + } + + if len(resp.MountTargets) == 0 { + break + } else { + log.WithFields(log.Fields{ + "mounttargets": resp.MountTargets, + "filesystemid": volumeID, + }).Info("waiting for MountTargets deletion") + } + + <-time.After(2 * time.Second) + } + + // Remove FileSystem + _, err = d.efsClient().DeleteFileSystem( + &awsefs.DeleteFileSystemInput{ + FileSystemId: aws.String(volumeID), + }) + if err != nil { + return err + } + + for { + log.WithFields(log.Fields{ + "filesystemid": volumeID, + }).Info("waiting for FileSystem deletion") + + _, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{ + FileSystemId: aws.String(volumeID), + }) + log.Error(err) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "FileSystemNotFound" { + break + } else { + return err + } + } else { + return err + } + } + + <-time.After(2 * time.Second) + } + + return nil +} + +// VolumeAttach attaches a volume and provides a token clients can use +// to validate that device has appeared locally. +func (d *driver) VolumeAttach( + ctx types.Context, + volumeID string, + opts *types.VolumeAttachOpts) (*types.Volume, string, error) { + + vol, err := d.VolumeInspect(ctx, volumeID, + &types.VolumeInspectOpts{Attachments: true}) + if err != nil { + return nil, "", err + } + + inst, err := d.InstanceInspect(ctx, nil) + if err != nil { + return nil, "", err + } + + var ma *types.VolumeAttachment + for _, att := range vol.Attachments { + if att.InstanceID.ID == inst.InstanceID.ID { + ma = att + break + } + } + + // No mount targets were found + if ma == nil { + request := &awsefs.CreateMountTargetInput{ + FileSystemId: aws.String(vol.ID), + SubnetId: aws.String(inst.InstanceID.ID), + } + if len(d.securityGroups()) > 0 { + request.SecurityGroups = aws.StringSlice(d.securityGroups()) + } + _, err = d.efsClient().CreateMountTarget(request) + // Failed to create mount target + if err != nil { + return nil, "", err + } + } + + return vol, "", err +} + +// VolumeDetach detaches a volume. +func (d *driver) VolumeDetach( + ctx types.Context, + volumeID string, + opts *types.VolumeDetachOpts) (*types.Volume, error) { + + // TODO(kasisnu): Think about what to do here? + // It is safe to remove the mount target + // when it is no longer being used anywhere + return nil, nil +} + +// VolumeCreateFromSnapshot (not implemented). +func (d *driver) VolumeCreateFromSnapshot( + ctx types.Context, + snapshotID, volumeName string, + opts *types.VolumeCreateOpts) (*types.Volume, error) { + return nil, types.ErrNotImplemented +} + +// VolumeCopy copies an existing volume (not implemented) +func (d *driver) VolumeCopy( + ctx types.Context, + volumeID, volumeName string, + opts types.Store) (*types.Volume, error) { + return nil, types.ErrNotImplemented +} + +// VolumeSnapshot snapshots a volume (not implemented) +func (d *driver) VolumeSnapshot( + ctx types.Context, + volumeID, snapshotName string, + opts types.Store) (*types.Snapshot, error) { + return nil, types.ErrNotImplemented +} + +func (d *driver) Snapshots( + ctx types.Context, + opts types.Store) ([]*types.Snapshot, error) { + return nil, nil +} + +func (d *driver) SnapshotInspect( + ctx types.Context, + snapshotID string, + opts types.Store) (*types.Snapshot, error) { + return nil, nil +} + +func (d *driver) SnapshotCopy( + ctx types.Context, + snapshotID, snapshotName, destinationID string, + opts types.Store) (*types.Snapshot, error) { + return nil, nil +} + +func (d *driver) SnapshotRemove( + ctx types.Context, + snapshotID string, + opts types.Store) error { + + return nil +} + +// Retrieve all filesystems with tags from AWS API. This is very expensive +// operation as it issues AWS SDK call per filesystem to retrieve tags. +func (d *driver) getAllFileSystems() (filesystems []*awsefs.FileSystemDescription, err error) { + resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{}) + if err != nil { + return nil, err + } + filesystems = append(filesystems, resp.FileSystems...) + + for resp.NextMarker != nil { + resp, err = d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{ + Marker: resp.NextMarker, + }) + if err != nil { + return nil, err + } + filesystems = append(filesystems, resp.FileSystems...) + } + + return filesystems, nil +} + +func (d *driver) getFileSystemLifeCycleState(fileSystemID string) (string, error) { + resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{ + FileSystemId: aws.String(fileSystemID), + }) + if err != nil { + return "", err + } + + if len(resp.FileSystems) > 0 { + fileSystem := resp.FileSystems[0] + return *fileSystem.LifeCycleState, nil + } + + return "", errors.New("FileSystem not found") +} + +func (d *driver) getPrintableName(name string) string { + return strings.TrimPrefix(name, d.tag()+tagDelimiter) +} + +func (d *driver) getFullVolumeName(name string) string { + return d.tag() + tagDelimiter + name +} + +func (d *driver) getVolumeAttachments(ctx types.Context, volumeID string) ( + []*types.VolumeAttachment, error) { + + if volumeID == "" { + return nil, errors.New("Missing volume ID") + } + resp, err := d.efsClient().DescribeMountTargets( + &awsefs.DescribeMountTargetsInput{ + FileSystemId: aws.String(volumeID), + }) + if err != nil { + return nil, err + } + + ld, ldOK := context.LocalDevices(ctx) + + var atts []*types.VolumeAttachment + for _, mountTarget := range resp.MountTargets { + var dev string + var status string + if ldOK { + // TODO(kasisnu): Check lifecycle state and build the path better + dev = *mountTarget.IpAddress + ":" + "/" + if _, ok := ld.DeviceMap[dev]; ok { + status = "Exported and Mounted" + } else { + status = "Exported and Unmounted" + } + } else { + status = "Exported" + } + attachmentSD := &types.VolumeAttachment{ + VolumeID: *mountTarget.FileSystemId, + InstanceID: &types.InstanceID{ID: *mountTarget.SubnetId, Driver: d.Name()}, + DeviceName: dev, + Status: status, + } + atts = append(atts, attachmentSD) + } + + return atts, nil +} + +func (d *driver) efsClient() *awsefs.EFS { + config := aws.NewConfig(). + WithCredentials(d.awsCreds). + WithRegion(d.region()) + + if types.Debug { + config = config. + WithLogger(newAwsLogger()). + WithLogLevel(aws.LogDebug) + } + + return awsefs.New(session.New(), config) +} + +func (d *driver) accessKey() string { + return d.config.GetString("efs.accessKey") +} + +func (d *driver) secretKey() string { + return d.config.GetString("efs.secretKey") +} + +func (d *driver) securityGroups() []string { + return strings.Split(d.config.GetString("efs.securityGroups"), ",") +} + +func (d *driver) region() string { + return d.config.GetString("efs.region") +} + +func (d *driver) tag() string { + return d.config.GetString("efs.tag") +} + +// Simple logrus adapter for AWS Logger interface +type awsLogger struct { + logger *log.Logger +} + +func newAwsLogger() *awsLogger { + return &awsLogger{ + logger: log.StandardLogger(), + } +} + +func (l *awsLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/drivers/storage/efs/tests/efs_test.go b/drivers/storage/efs/tests/efs_test.go new file mode 100644 index 00000000..7cf86612 --- /dev/null +++ b/drivers/storage/efs/tests/efs_test.go @@ -0,0 +1,301 @@ +package efs + +import ( + "fmt" + "os" + "strconv" + "strings" + "testing" + + log "github.com/Sirupsen/logrus" + "github.com/akutz/gofig" + "github.com/stretchr/testify/assert" + + "github.com/emccode/libstorage/api/context" + "github.com/emccode/libstorage/api/registry" + "github.com/emccode/libstorage/api/server" + "github.com/emccode/libstorage/api/server/executors" + apitests "github.com/emccode/libstorage/api/tests" + "github.com/emccode/libstorage/api/types" + "github.com/emccode/libstorage/api/utils" + + // load the driver + "github.com/emccode/libstorage/drivers/storage/efs" + efsx "github.com/emccode/libstorage/drivers/storage/efs/executor" +) + +var ( + lsxbin string + + lsxLinuxInfo, _ = executors.ExecutorInfoInspect("lsx-linux", false) + lsxDarwinInfo, _ = executors.ExecutorInfoInspect("lsx-darwin", false) + + configYAML = []byte(` +efs: + tag: integrationtest + region: %s + securityGroups: %s +`) +) + +func skipTests() bool { + travis, _ := strconv.ParseBool(os.Getenv("TRAVIS")) + noTest, _ := strconv.ParseBool(os.Getenv("TEST_SKIP_EFS")) + return travis || noTest +} + +var volumeName string +var volumeName2 string + +func init() { + uuid, _ := types.NewUUID() + uuids := strings.Split(uuid.String(), "-") + volumeName = uuids[0] + uuid, _ = types.NewUUID() + uuids = strings.Split(uuid.String(), "-") + volumeName2 = uuids[0] + awsRegion := os.Getenv("AWS_REGION") + awsSecurityGroups := os.Getenv("AWS_EFS_SECURITY_GROUPS") + if awsRegion == "" { + awsRegion = "us-east-1" + } + if awsSecurityGroups == "" { + awsSecurityGroups = "sg-4bf71430" + } + configYAML = []byte(fmt.Sprintf(string(configYAML[:]), awsRegion, awsSecurityGroups)) +} + +func TestMain(m *testing.M) { + server.CloseOnAbort() + ec := m.Run() + os.Exit(ec) +} + +func TestInstanceID(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + sd, err := registry.NewStorageDriver(efs.Name) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + if err := sd.Init(ctx, gofig.New()); err != nil { + t.Fatal(err) + } + + iid, err := efsx.InstanceID() + assert.NoError(t, err) + if err != nil { + t.Error("failed TestInstanceID") + t.FailNow() + } + assert.NotEqual(t, iid, "") + + ctx = ctx.WithValue(context.InstanceIDKey, iid) + i, err := sd.InstanceInspect(ctx, utils.NewStore()) + if err != nil { + t.Fatal(err) + } + + iid = i.InstanceID + + apitests.Run( + t, efs.Name, configYAML, + (&apitests.InstanceIDTest{ + Driver: efs.Name, + Expected: iid, + }).Test) +} + +func TestServices(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + tf := func(config gofig.Config, client types.Client, t *testing.T) { + reply, err := client.API().Services(nil) + assert.NoError(t, err) + assert.Equal(t, len(reply), 1) + + _, ok := reply[efs.Name] + assert.True(t, ok) + } + apitests.Run(t, efs.Name, configYAML, tf) +} + +func volumeCreate( + t *testing.T, client types.Client, volumeName string) *types.Volume { + log.WithField("volumeName", volumeName).Info("creating volume") + size := int64(1) + + opts := map[string]interface{}{ + "priority": 2, + "owner": "root@example.com", + } + + volumeCreateRequest := &types.VolumeCreateRequest{ + Name: volumeName, + Size: &size, + Opts: opts, + } + + reply, err := client.API().VolumeCreate(nil, efs.Name, volumeCreateRequest) + assert.NoError(t, err) + if err != nil { + t.FailNow() + t.Error("failed volumeCreate") + } + apitests.LogAsJSON(reply, t) + + assert.Equal(t, volumeName, reply.Name) + return reply +} + +func volumeByName( + t *testing.T, client types.Client, volumeName string) *types.Volume { + + log.WithField("volumeName", volumeName).Info("get volume name") + vols, err := client.API().Volumes(nil, false) + assert.NoError(t, err) + if err != nil { + t.FailNow() + } + assert.Contains(t, vols, efs.Name) + for _, vol := range vols[efs.Name] { + if vol.Name == volumeName { + return vol + } + } + t.FailNow() + t.Error("failed volumeByName") + return nil +} + +func TestVolumeCreateRemove(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + tf := func(config gofig.Config, client types.Client, t *testing.T) { + vol := volumeCreate(t, client, volumeName) + volumeRemove(t, client, vol.ID) + } + apitests.Run(t, efs.Name, configYAML, tf) +} + +func volumeRemove(t *testing.T, client types.Client, volumeID string) { + log.WithField("volumeID", volumeID).Info("removing volume") + err := client.API().VolumeRemove( + nil, efs.Name, volumeID) + assert.NoError(t, err) + if err != nil { + t.Error("failed volumeRemove") + t.FailNow() + } +} + +func TestVolumes(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + tf := func(config gofig.Config, client types.Client, t *testing.T) { + _ = volumeCreate(t, client, volumeName) + _ = volumeCreate(t, client, volumeName2) + + vol1 := volumeByName(t, client, volumeName) + vol2 := volumeByName(t, client, volumeName2) + + volumeRemove(t, client, vol1.ID) + volumeRemove(t, client, vol2.ID) + } + apitests.Run(t, efs.Name, configYAML, tf) +} + +func volumeAttach( + t *testing.T, client types.Client, volumeID string) *types.Volume { + + log.WithField("volumeID", volumeID).Info("attaching volume") + reply, token, err := client.API().VolumeAttach( + nil, efs.Name, volumeID, &types.VolumeAttachRequest{}) + + assert.NoError(t, err) + if err != nil { + t.Error("failed volumeAttach") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.Equal(t, token, "") + + return reply +} + +func volumeInspectAttached( + t *testing.T, client types.Client, volumeID string) *types.Volume { + + log.WithField("volumeID", volumeID).Info("inspecting volume") + reply, err := client.API().VolumeInspect(nil, efs.Name, volumeID, true) + assert.NoError(t, err) + + if err != nil { + t.Error("failed volumeInspectAttached") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.Len(t, reply.Attachments, 1) + // assert.NotEqual(t, "", reply.Attachments[0].DeviceName) + return reply +} + +func volumeInspectDetached( + t *testing.T, client types.Client, volumeID string) *types.Volume { + + log.WithField("volumeID", volumeID).Info("inspecting volume") + reply, err := client.API().VolumeInspect(nil, efs.Name, volumeID, true) + assert.NoError(t, err) + + if err != nil { + t.Error("failed volumeInspectDetached") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.Len(t, reply.Attachments, 0) + apitests.LogAsJSON(reply, t) + return reply +} + +func volumeDetach( + t *testing.T, client types.Client, volumeID string) *types.Volume { + + log.WithField("volumeID", volumeID).Info("detaching volume") + reply, err := client.API().VolumeDetach( + nil, efs.Name, volumeID, &types.VolumeDetachRequest{}) + assert.NoError(t, err) + if err != nil { + t.Error("failed volumeDetach") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.Len(t, reply.Attachments, 0) + return reply +} + +func TestVolumeAttach(t *testing.T) { + if skipTests() { + t.SkipNow() + } + var vol *types.Volume + tf := func(config gofig.Config, client types.Client, t *testing.T) { + vol = volumeCreate(t, client, volumeName) + _ = volumeAttach(t, client, vol.ID) + _ = volumeInspectAttached(t, client, vol.ID) + // Don't test detaching volumes + // _ = volumeDetach(t, client, vol.ID) + // _ = volumeInspectDetached(t, client, vol.ID) + volumeRemove(t, client, vol.ID) + } + apitests.RunGroup(t, efs.Name, configYAML, tf) +} diff --git a/glide.lock b/glide.lock index 68a72df6..724d3e77 100644 --- a/glide.lock +++ b/glide.lock @@ -17,6 +17,8 @@ imports: - virtualboxclient - name: github.com/asaskevich/govalidator version: df81827fdd59d8b4fb93d8910b286ab7a3919520 +- name: github.com/aws/aws-sdk-go + version: caee6e866bf437a6bef0777a3bf141cdd3aa022d - name: github.com/BurntSushi/toml version: f0aeabca5a127c4078abb8c8d64298b147264b55 - name: github.com/cesanta/ucl diff --git a/glide.yaml b/glide.yaml index 5287423d..b4b029ef 100644 --- a/glide.yaml +++ b/glide.yaml @@ -36,6 +36,12 @@ import: - package: github.com/emccode/goisilon ref: f9b53f0aaadb12a26b134830142fc537f492cb13 +### EFS + - package: github.com/aws/aws-sdk-go + version: v1.2.2 + repo: https://github.com/aws/aws-sdk-go + + ################################################################################ ## Build System Tools ## ################################################################################ diff --git a/imports/executors/imports_executor.go b/imports/executors/imports_executor.go index cec122f7..895a4cac 100644 --- a/imports/executors/imports_executor.go +++ b/imports/executors/imports_executor.go @@ -3,6 +3,7 @@ package executors import ( // load the storage executors //_ "github.com/emccode/libstorage/drivers/storage/ec2/executor" + _ "github.com/emccode/libstorage/drivers/storage/efs/executor" //_ "github.com/emccode/libstorage/drivers/storage/gce/executor" _ "github.com/emccode/libstorage/drivers/storage/isilon/executor" //_ "github.com/emccode/libstorage/drivers/storage/openstack/executor" diff --git a/imports/remote/imports_remote.go b/imports/remote/imports_remote.go index 1584bdfa..50c06e86 100644 --- a/imports/remote/imports_remote.go +++ b/imports/remote/imports_remote.go @@ -2,6 +2,7 @@ package remote import ( // import to load + _ "github.com/emccode/libstorage/drivers/storage/efs/storage" _ "github.com/emccode/libstorage/drivers/storage/isilon/storage" _ "github.com/emccode/libstorage/drivers/storage/scaleio/storage" _ "github.com/emccode/libstorage/drivers/storage/vbox/storage"