From 1793ce2f429f3ef93b88506c4e9097fa663b0e76 Mon Sep 17 00:00:00 2001 From: Proud Heng Date: Thu, 28 Jul 2016 15:47:41 -0700 Subject: [PATCH] Add EBS storage driver This commit ports volume management functionality for AWS EBS for EC2 instances from REX-Ray 0.3.3, as per issue #183. --- api/drivers/storage/ebs/ebs_config_compat.go | 73 + api/server/router/volume/volume_routes.go | 1 + api/types/types_drivers_storage.go | 1 + api/types/types_http_requests.go | 1 + api/types/types_model.go | 6 + api/utils/schema/schema_generated.go | 11 + drivers/storage/ebs/ebs.go | 37 + drivers/storage/ebs/executor/ebs_executor.go | 216 +++ drivers/storage/ebs/storage/ebs_storage.go | 1307 +++++++++++++++++ drivers/storage/ebs/tests/README.md | 9 + drivers/storage/ebs/tests/coverage.mk | 2 + drivers/storage/ebs/tests/ebs_test.go | 728 +++++++++ .../libstorage/libstorage_driver_funcs.go | 1 + glide.yaml | 2 +- imports/executors/imports_executor.go | 2 +- imports/remote/imports_remote.go | 1 + libstorage.json | 11 + 17 files changed, 2407 insertions(+), 2 deletions(-) create mode 100644 api/drivers/storage/ebs/ebs_config_compat.go create mode 100644 drivers/storage/ebs/ebs.go create mode 100644 drivers/storage/ebs/executor/ebs_executor.go create mode 100644 drivers/storage/ebs/storage/ebs_storage.go create mode 100644 drivers/storage/ebs/tests/README.md create mode 100644 drivers/storage/ebs/tests/coverage.mk create mode 100644 drivers/storage/ebs/tests/ebs_test.go diff --git a/api/drivers/storage/ebs/ebs_config_compat.go b/api/drivers/storage/ebs/ebs_config_compat.go new file mode 100644 index 00000000..531368f4 --- /dev/null +++ b/api/drivers/storage/ebs/ebs_config_compat.go @@ -0,0 +1,73 @@ +package types + +import "github.com/akutz/gofig" +import log "github.com/Sirupsen/logrus" + +const ( + //ConfigEBS is a config key. + ConfigEBS = "ebs" + + //ConfigEBSAccessKey is a config key. + ConfigEBSAccessKey = ConfigEBS + ".accessKey" + + //ConfigEBSSecretKey is a config key. + ConfigEBSSecretKey = ConfigEBS + ".secretKey" + + //ConfigEBSRegion is a config key. + ConfigEBSRegion = ConfigEBS + ".region" + + //ConfigEBSEndpoint is a config key. + ConfigEBSEndpoint = ConfigEBS + ".endpoint" + + //ConfigEBSMaxRetries is a config key. + ConfigEBSMaxRetries = ConfigEBS + ".maxRetries" + + //ConfigEBSTag is a config key. + ConfigEBSTag = ConfigEBS + ".tag" + + //ConfigEBSRexrayTag is a config key. + ConfigEBSRexrayTag = ConfigEBS + ".rexrayTag" + + //ConfigOldEBS is a config key. + ConfigOldEBS = "ec2" + + //ConfigOldEBSAccessKey is a config key. + ConfigOldEBSAccessKey = ConfigOldEBS + ".accessKey" + + //ConfigOldEBSSecretKey is a config key. + ConfigOldEBSSecretKey = ConfigOldEBS + ".secretKey" + + //ConfigOldEBSRegion is a config key. + ConfigOldEBSRegion = ConfigOldEBS + ".region" + + //ConfigOldEBSEndpoint is a config key. + ConfigOldEBSEndpoint = ConfigOldEBS + ".endpoint" + + //ConfigOldEBSMaxRetries is a config key. + ConfigOldEBSMaxRetries = ConfigOldEBS + ".maxRetries" + + //ConfigOldEBSTag is a config key. + ConfigOldEBSTag = ConfigOldEBS + ".tag" + + //ConfigOldEBSRexrayTag is a config key. + ConfigOldEBSRexrayTag = ConfigOldEBS + ".rexrayTag" +) + +// BackCompat ensures keys can be used from old configurations. +func BackCompat(config gofig.Config) { + checks := [][]string{ + {ConfigEBSAccessKey, ConfigOldEBSAccessKey}, + {ConfigEBSSecretKey, ConfigOldEBSSecretKey}, + {ConfigEBSRegion, ConfigOldEBSRegion}, + {ConfigEBSEndpoint, ConfigOldEBSEndpoint}, + {ConfigEBSMaxRetries, ConfigOldEBSMaxRetries}, + {ConfigEBSTag, ConfigOldEBSTag}, + {ConfigEBSRexrayTag, ConfigOldEBSRexrayTag}, + } + for _, check := range checks { + if !config.IsSet(check[0]) && config.IsSet(check[1]) { + log.Debug(config.Get(check[1])) + config.Set(check[0], config.Get(check[1])) + } + } +} diff --git a/api/server/router/volume/volume_routes.go b/api/server/router/volume/volume_routes.go index 6aa367c4..ebc30ca8 100644 --- a/api/server/router/volume/volume_routes.go +++ b/api/server/router/volume/volume_routes.go @@ -307,6 +307,7 @@ func (r *router) volumeCreate( IOPS: store.GetInt64Ptr("iops"), Size: store.GetInt64Ptr("size"), Type: store.GetStringPtr("type"), + Encrypted: store.GetBoolPtr("encrypted"), Opts: store, }) diff --git a/api/types/types_drivers_storage.go b/api/types/types_drivers_storage.go index e2ed9370..fac487c8 100644 --- a/api/types/types_drivers_storage.go +++ b/api/types/types_drivers_storage.go @@ -24,6 +24,7 @@ type VolumeCreateOpts struct { IOPS *int64 Size *int64 Type *string + Encrypted *bool Opts Store } diff --git a/api/types/types_http_requests.go b/api/types/types_http_requests.go index 5bda09da..471e4a65 100644 --- a/api/types/types_http_requests.go +++ b/api/types/types_http_requests.go @@ -8,6 +8,7 @@ type NewRequestObjFunc func() interface{} type VolumeCreateRequest struct { Name string `json:"name"` AvailabilityZone *string `json:"availabilityZone,omitempty"` + Encrypted *bool `json:"encrypted,omitempty"` IOPS *int64 `json:"iops,omitempty"` Size *int64 `json:"size,omitempty"` Type *string `json:"type,omitempty"` diff --git a/api/types/types_model.go b/api/types/types_model.go index 54759933..795474f0 100644 --- a/api/types/types_model.go +++ b/api/types/types_model.go @@ -102,6 +102,9 @@ type Snapshot struct { // The name of the snapshot. Name string `json:"name,omitempty" yaml:",omitempty"` + // A flag indicating whether or not the snapshot is encrypted. + Encrypted bool `json:"encrypted,omitempty" yaml:"encrypted,omitempty"` + // The snapshot's ID. ID string `json:"id" yaml:"id"` @@ -129,6 +132,9 @@ type Volume struct { // The availability zone for which the volume is available. AvailabilityZone string `json:"availabilityZone,omitempty" yaml:"availabilityZone,omitempty"` + // A flag indicating whether or not the volume is encrypted. + Encrypted bool `json:"encrypted,omitempty" yaml:"encrypted,omitempty"` + // The volume IOPs. IOPS int64 `json:"iops,omitempty" yaml:"iops,omitempty"` diff --git a/api/utils/schema/schema_generated.go b/api/utils/schema/schema_generated.go index 0836aa87..41a81969 100644 --- a/api/utils/schema/schema_generated.go +++ b/api/utils/schema/schema_generated.go @@ -35,6 +35,10 @@ const ( "type": "string", "description": "The zone for which the volume is available." }, + "encrypted": { + "type": "boolean", + "description": "A flag indicating whether or not the volume is encrypted." + }, "iops": { "type": "number", "description": "The volume IOPs." @@ -156,6 +160,10 @@ const ( "type": "string", "description": "A description of the snapshot." }, + "encrypted": { + "type": "boolean", + "description": "A flag indicating whether or not the snapshot is encrypted." + }, "startTime": { "type": "number", "description": "The time (epoch) at which the request to create the snapshot was submitted." @@ -419,6 +427,9 @@ const ( "availabilityZone": { "type": "string" }, + "encrypted": { + "type": "boolean" + }, "iops": { "type": "number" }, diff --git a/drivers/storage/ebs/ebs.go b/drivers/storage/ebs/ebs.go new file mode 100644 index 00000000..92935fe3 --- /dev/null +++ b/drivers/storage/ebs/ebs.go @@ -0,0 +1,37 @@ +package ebs + +import ( + "github.com/akutz/gofig" +) + +const ( + // Name is the provider's name. + Name = "ebs" + + // TagDelimiter separates tags from volume or snapshot names + TagDelimiter = "/" + + // DefaultMaxRetries is the max number of times to retry failed operations + DefaultMaxRetries = 10 +) + +func init() { + registerConfig() +} + +func registerConfig() { + r := gofig.NewRegistration("EBS") + r.Key(gofig.String, "", "", "", "ebs.accessKey") + r.Key(gofig.String, "", "", "", "ebs.secretKey") + r.Key(gofig.String, "", "", "", "ebs.region") + r.Key(gofig.String, "", "", "", "ebs.endpoint") + r.Key(gofig.String, "", "", "", "ebs.maxRetries") + r.Key(gofig.String, "", "", "Tag prefix for EBS naming", "ebs.tag") + r.Key(gofig.String, "", "", "", "ec2.accessKey") + r.Key(gofig.String, "", "", "", "ec2.secretKey") + r.Key(gofig.String, "", "", "", "ec2.region") + r.Key(gofig.String, "", "", "", "ec2.endpoint") + r.Key(gofig.String, "", "", "", "ec2.maxRetries") + r.Key(gofig.String, "", "", "Tag prefix for EBS naming", "ec2.tag") + gofig.Register(r) +} diff --git a/drivers/storage/ebs/executor/ebs_executor.go b/drivers/storage/ebs/executor/ebs_executor.go new file mode 100644 index 00000000..136d6abc --- /dev/null +++ b/drivers/storage/ebs/executor/ebs_executor.go @@ -0,0 +1,216 @@ +package executor + +import ( + "bufio" + "io/ioutil" + "net/http" + "regexp" + "strings" + + "github.com/akutz/gofig" + "github.com/akutz/goof" + + ebsConfig "github.com/emccode/libstorage/api/drivers/storage/ebs" + "github.com/emccode/libstorage/api/registry" + "github.com/emccode/libstorage/api/types" + "github.com/emccode/libstorage/drivers/storage/ebs" +) + +// driver is the storage executor for the ec2 storage driver. +type driver struct { + config gofig.Config + nextDeviceInfo *types.NextDeviceInfo +} + +func init() { + registry.RegisterStorageExecutor(ebs.Name, newDriver) + // Backwards compatibility for ec2 executor + registry.RegisterStorageExecutor("ec2", newDriver) +} + +func newDriver() types.StorageExecutor { + return &driver{} +} + +func (d *driver) Init(ctx types.Context, config gofig.Config) error { + // Ensure backwards compatibility with ebs and ec2 in config + ebsConfig.BackCompat(config) + + d.config = config + // EBS suggests to use /dev/sd[f-p] for Linux EC2 instances. + // Also on Linux EC2 instances, although the device path may show up + // as /dev/sd* on the EC2 side, it will appear locally as /dev/xvd* + d.nextDeviceInfo = &types.NextDeviceInfo{ + Prefix: "xvd", + Pattern: "[f-p]", + Ignore: false, + } + + return nil +} + +func (d *driver) Name() string { + return ebs.Name +} + +// InstanceID returns the local instance ID for the test +func InstanceID() (*types.InstanceID, error) { + return newDriver().InstanceID(nil, nil) +} + +// InstanceID returns the instance ID from the current instance from metadata +func (d *driver) InstanceID( + ctx types.Context, + opts types.Store) (*types.InstanceID, error) { + // Retrieve instance ID from metadata + res, err := http.Get("http://169.254.169.254/latest/meta-data/instance-id/") + if err != nil { + return nil, goof.WithError("ec2 instance id lookup failed", err) + } + instanceID, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + return nil, goof.WithError("error reading ec2 instance id", err) + } + + iid := &types.InstanceID{Driver: d.Name()} + if err := iid.MarshalMetadata(string(instanceID)); err != nil { + return nil, goof.WithError("error marshalling instance id", err) + } + + return iid, nil +} + +// NextDevice returns the next available device. +func (d *driver) NextDevice( + ctx types.Context, + opts types.Store) (string, error) { + // All possible device paths on Linux EC2 instances are /dev/xvd[f-p] + letters := []string{ + "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p"} + + // Find which letters are used for local devices + localDeviceNames := make(map[string]bool) + + localDevices, err := d.LocalDevices( + ctx, &types.LocalDevicesOpts{Opts: opts}) + if err != nil { + return "", goof.WithError("error getting local devices", err) + } + localDeviceMapping := localDevices.DeviceMap + + for localDevice := range localDeviceMapping { + re, _ := regexp.Compile(`^/dev/` + + d.nextDeviceInfo.Prefix + + `(` + d.nextDeviceInfo.Pattern + `)`) + res := re.FindStringSubmatch(localDevice) + if len(res) > 0 { + localDeviceNames[res[1]] = true + } + } + + // Find which letters are used for ephemeral devices + ephemeralDevices, err := d.getEphemeralDevices() + if err != nil { + return "", goof.WithError("error getting ephemeral devices", err) + } + + for _, ephemeralDevice := range ephemeralDevices { + re, _ := regexp.Compile(`^` + + d.nextDeviceInfo.Prefix + + `(` + d.nextDeviceInfo.Pattern + `)`) + res := re.FindStringSubmatch(ephemeralDevice) + if len(res) > 0 { + localDeviceNames[res[1]] = true + } + } + + // Find next available letter for device path + for _, letter := range letters { + if !localDeviceNames[letter] { + nextDeviceName := "/dev/" + + d.nextDeviceInfo.Prefix + letter + return nextDeviceName, nil + } + } + return "", goof.New("No available device") +} + +// Retrieve device paths currently attached and/or mounted +func (d *driver) LocalDevices( + ctx types.Context, + opts *types.LocalDevicesOpts) (*types.LocalDevices, error) { + // Read from /proc/partitions + localDevices := make(map[string]string) + file := "/proc/partitions" + contentBytes, err := ioutil.ReadFile(file) + if err != nil { + return nil, goof.WithError( + "Error reading /proc/partitions", err) + } + + content := string(contentBytes) + + // Parse device names + var deviceName string + lines := strings.Split(content, "\n") + for _, line := range lines[2:] { + fields := strings.Fields(line) + if len(fields) == 4 { + deviceName = "/dev/" + fields[3] + // Device ID is also device path for EBS, since it + // can be obtained both locally and remotely + // (remotely being from the AWS API side) + localDevices[deviceName] = deviceName + } + } + + return &types.LocalDevices{ + Driver: d.Name(), + DeviceMap: localDevices, + }, nil + +} + +// Find ephemeral devices from metadata +func (d *driver) getEphemeralDevices() (deviceNames []string, err error) { + // Get list of all block devices + res, err := http.Get("http://169.254.169.254/latest/meta-data/block-device-mapping/") + if err != nil { + return nil, goof.WithError("ec2 block device mapping lookup failed", err) + } + blockDeviceMappings, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + return nil, goof.WithError("error reading ec2 block device mappings", err) + } + + // Filter list of all block devices for ephemeral devices + re, _ := regexp.Compile(`ephemeral([0-9]|1[0-9]|2[0-3])$`) + + scanner := bufio.NewScanner(strings.NewReader(string(blockDeviceMappings))) + scanner.Split(bufio.ScanWords) + + var input string + for scanner.Scan() { + input = scanner.Text() + if re.MatchString(input) { + // Find device name for ephemeral device + res, err := http.Get("http://169.254.169.254/latest/meta-data/block-device-mapping/" + input) + if err != nil { + return nil, goof.WithError("ec2 block device mapping lookup failed", err) + } + deviceName, err := ioutil.ReadAll(res.Body) + // Compensate for kernel volume mapping i.e. change "/dev/sda" to "/dev/xvda" + deviceNameStr := strings.Replace(string(deviceName), "sd", d.nextDeviceInfo.Prefix, 1) + res.Body.Close() + if err != nil { + return nil, goof.WithError("error reading ec2 block device mappings", err) + } + + deviceNames = append(deviceNames, deviceNameStr) + } + } + + return deviceNames, nil +} diff --git a/drivers/storage/ebs/storage/ebs_storage.go b/drivers/storage/ebs/storage/ebs_storage.go new file mode 100644 index 00000000..8f358218 --- /dev/null +++ b/drivers/storage/ebs/storage/ebs_storage.go @@ -0,0 +1,1307 @@ +package storage + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/akutz/gofig" + "github.com/akutz/goof" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + awsec2 "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/emccode/libstorage/api/context" + ebsConfig "github.com/emccode/libstorage/api/drivers/storage/ebs" + "github.com/emccode/libstorage/api/registry" + "github.com/emccode/libstorage/api/types" + "github.com/emccode/libstorage/drivers/storage/ebs" +) + +const ( + // waitVolumeCreate signifies to wait for volume creation to complete + waitVolumeCreate = "create" + // waitVolumeAttach signifies to wait for volume attachment to complete + waitVolumeAttach = "attach" + // waitVolumeDetach signifies to wait for volume detachment to complete + waitVolumeDetach = "detach" +) + +type driver struct { + config gofig.Config + nextDeviceInfo *types.NextDeviceInfo + instanceDocument *instanceIdentityDocument + ec2Instance *awsec2.EC2 + awsCreds *credentials.Credentials +} + +type instanceIdentityDocument struct { + InstanceID string `json:"instanceId"` + BillingProducts interface{} `json:"billingProducts"` + AccountID string `json:"accountId"` + ImageID string `json:"imageId"` + InstanceType string `json:"instanceType"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + PendingTime string `json:"pendingTime"` + Architecture string `json:"architecture"` + Region string `json:"region"` + Version string `json:"version"` + AvailabilityZone string `json:"availabilityZone"` + DevpayproductCodes interface{} `json:"devpayProductCodes"` + PrivateIP string `json:"privateIp"` +} + +func init() { + registry.RegisterStorageDriver(ebs.Name, newDriver) + // Backwards compatibility for ec2 driver + registry.RegisterStorageDriver("ec2", newDriver) +} + +func newDriver() types.StorageDriver { + return &driver{} +} + +func (d *driver) Name() string { + return ebs.Name +} + +// Init initializes the driver. +func (d *driver) Init(context types.Context, config gofig.Config) error { + // Ensure backwards compatibility with ebs and ec2 in config + ebsConfig.BackCompat(config) + + d.config = config + + // Initialize with config content for logging + fields := map[string]interface{}{ + "moduleName": d.Name(), + "accessKey": d.accessKey(), + "region": d.region(), + "endpoint": d.endpoint(), + "tag": d.tag(), + } + + log.WithFields(fields).Debug("starting provider driver") + + // Mask password + if d.secretKey() == "" { + fields["secretKey"] = "" + } else { + fields["secretKey"] = "******" + } + + d.nextDeviceInfo = &types.NextDeviceInfo{ + Prefix: "xvd", + // EBS suggests to use /dev/sd[f-p] for Linux EC2 instances. + // Also on Linux EC2 instances, although the device path may show up + // as /dev/sd* on the EC2 side, it will appear locally as /dev/xvd* + Pattern: "[f-p]", + Ignore: false, + } + + // Prepare input for starting new EC2 client with a session + var err error + d.instanceDocument, err = getInstanceIdentityDocument() + if err != nil { + return goof.WithFieldsE(fields, "error getting instance id doc", err) + } + + region := d.region() + if region == "" { + region = d.instanceDocument.Region + } + + endpoint := d.endpoint() + if endpoint == "" && region != "" { + endpoint = fmt.Sprintf("ec2.%s.amazonaws.com", region) + } + + maxRetries := d.maxRetries() + + mySession := session.New() + + d.awsCreds = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.StaticProvider{Value: credentials.Value{AccessKeyID: d.accessKey(), SecretAccessKey: d.secretKey()}}, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(mySession), + }, + }) + + awsConfig := aws.NewConfig().WithCredentials(d.awsCreds).WithRegion(region).WithEndpoint(endpoint).WithMaxRetries(maxRetries) + + // Start new EC2 client with config info + d.ec2Instance = awsec2.New(mySession, awsConfig) + + log.WithFields(fields).Info("storage driver initialized") + + return nil +} + +// NextDeviceInfo returns the information about the driver's next available +// device workflow. +func (d *driver) NextDeviceInfo( + ctx types.Context) (*types.NextDeviceInfo, error) { + return d.nextDeviceInfo, nil +} + +// Type returns the type of storage the driver provides. +func (d *driver) Type(ctx types.Context) (types.StorageType, error) { + //Example: Block storage + return types.Block, nil +} + +// InstanceInspect returns an instance. +func (d *driver) InstanceInspect( + ctx types.Context, + opts types.Store) (*types.Instance, error) { + // get instance ID + iid := context.MustInstanceID(ctx) + + // If no instance ID, return blank instance + if iid.ID != "" { + return &types.Instance{InstanceID: iid}, nil + } + + // Decode metadata from instance ID + var awsInstanceID string + if err := iid.UnmarshalMetadata(&awsInstanceID); err != nil { + return nil, goof.WithError( + "Error unmarshalling instance id metadata", err) + } + instanceID := &types.InstanceID{ID: awsInstanceID, Driver: d.Name()} + + return &types.Instance{InstanceID: instanceID}, nil +} + +// Volumes returns all volumes or a filtered list of volumes. +func (d *driver) Volumes( + ctx types.Context, + opts *types.VolumesOpts) ([]*types.Volume, error) { + // Get all volumes via EC2 API + ec2vols, err := d.getVolume(ctx, "", "") + if err != nil { + return nil, goof.WithError("Error getting volume", err) + } + if len(ec2vols) == 0 { + return nil, goof.New("no volumes returned") + } + // Convert retrieved volumes to libStorage types.Volume + vols, convErr := d.toTypesVolume(ctx, ec2vols, opts.Attachments) + if convErr != nil { + return nil, goof.WithError("Error converting to types.Volume", convErr) + } + return vols, nil +} + +// VolumeInspect inspects a single volume. +func (d *driver) VolumeInspect( + ctx types.Context, + volumeID string, + opts *types.VolumeInspectOpts) (*types.Volume, error) { + // Get volume corresponding to volume ID via EC2 API + ec2vols, err := d.getVolume(ctx, volumeID, "") + if err != nil { + return nil, goof.WithError("Error getting volume", err) + } + if len(ec2vols) == 0 { + return nil, goof.New("no volumes returned") + } + vols, convErr := d.toTypesVolume(ctx, ec2vols, opts.Attachments) + if convErr != nil { + return nil, goof.WithError("Error converting to types.Volume", convErr) + } + + // Because getVolume returns an array + // and we only expect the 1st element to be a match, return 1st element + return vols[0], nil +} + +// VolumeCreate creates a new volume. +func (d *driver) VolumeCreate(ctx types.Context, volumeName string, + opts *types.VolumeCreateOpts) (*types.Volume, error) { + // Initialize for logging + fields := map[string]interface{}{ + "driverName": d.Name(), + "volumeName": volumeName, + "opts": opts, + } + + log.WithFields(fields).Debug("creating volume") + + // Check if volume with same name exists + ec2vols, err := d.getVolume(ctx, "", volumeName) + if err != nil { + return nil, goof.WithFieldsE(fields, "Error getting volume", err) + } + volumes, convErr := d.toTypesVolume(ctx, ec2vols, false) + if convErr != nil { + return nil, goof.WithFieldsE(fields, "Error converting to types.Volume", convErr) + } + + if len(volumes) > 0 { + return nil, goof.WithFields(fields, "volume name already exists") + } + + volume := &types.Volume{} + + // Pass arguments into libStorage types.Volume + if opts.AvailabilityZone != nil { + volume.AvailabilityZone = *opts.AvailabilityZone + } + if opts.Type != nil { + volume.Type = *opts.Type + } + if opts.Size != nil { + volume.Size = *opts.Size + } + if opts.IOPS != nil { + volume.IOPS = *opts.IOPS + } + if opts.Encrypted != nil { + volume.Encrypted = *opts.Encrypted + } + + // Pass libStorage types.Volume to helper function which calls EC2 API + vol, err := d.createVolume(ctx, volumeName, "", volume) + if err != nil { + return nil, goof.WithFieldsE(fields, "Error creating volume", err) + } + // Return the volume created + return d.VolumeInspect(ctx, *vol.VolumeId, &types.VolumeInspectOpts{ + Attachments: true, + }) +} + +// VolumeCreateFromSnapshot creates a new volume from an existing snapshot. +func (d *driver) VolumeCreateFromSnapshot( + ctx types.Context, + snapshotID, volumeName string, + opts *types.VolumeCreateOpts) (*types.Volume, error) { + // TODO Snapshots are not implemented yet + return nil, types.ErrNotImplemented + /* + // Initialize for logging + fields := map[string]interface{}{ + "driverName": d.Name(), + "snapshotID": snapshotID, + "volumeName": volumeName, + "opts": opts, + } + + log.WithFields(fields).Debug("creating volume from snapshot") + + // Check if volume with same name exists + ec2vols, err := d.getVolume(ctx, "", volumeName) + if err != nil { + return nil, goof.WithFieldsE(fields, "Error getting volume", err) + } + volumes, convErr := d.toTypesVolume(ctx, ec2vols, false) + if convErr != nil { + return nil, goof.WithFieldsE(fields, + "Error converting to types.Volume", convErr) + } + + if len(volumes) > 0 { + return nil, goof.WithFields(fields, "volume name already exists") + } + + volume := &types.Volume{} + + // Pass arguments into libStorage types.Volume + if opts.AvailabilityZone != nil { + volume.AvailabilityZone = *opts.AvailabilityZone + } + if opts.Type != nil { + volume.Type = *opts.Type + } + if opts.Size != nil { + volume.Size = *opts.Size + } + if opts.IOPS != nil { + volume.IOPS = *opts.IOPS + } + if *opts.Encrypted == false { + // Volume must be encrypted if snapshot is encrypted + snapshot, err := d.SnapshotInspect(ctx, snapshotID, nil) + if err != nil { + return &types.Volume{}, goof.WithFieldsE(fields, + "Error getting snapshot", err) + } + volume.Encrypted = snapshot.Encrypted + } else { + volume.Encrypted = *opts.Encrypted + } + + // Pass libStorage types.Volume to helper function which calls EC2 API + vol, err := d.createVolume(ctx, volumeName, snapshotID, volume) + if err != nil { + return &types.Volume{}, goof.WithFieldsE(fields, + "error creating volume", err) + } + // Return the volume created + return d.VolumeInspect(ctx, *vol.VolumeId, &types.VolumeInspectOpts{ + Attachments: true, + }) + */ +} + +// VolumeCopy copies an existing volume. +func (d *driver) VolumeCopy( + ctx types.Context, + volumeID, volumeName string, + opts types.Store) (*types.Volume, error) { + // TODO Snapshots are not implemented yet + return nil, types.ErrNotImplemented + /* + // Creates a temp snapshot of an existing volume, + // and creates a volume from that snapshot. + var ( + ec2vols []*awsec2.Volume + err error + snapshot *types.Snapshot + vol *types.Volume + ) + + // Initialize for logging + fields := map[string]interface{}{ + "driverName": d.Name(), + "volumeID": volumeID, + "volumeName": volumeName, + "opts": opts, + } + + log.WithFields(fields).Debug("creating volume from snapshot") + + // Check if volume with same name exists + ec2VolsToCheck, err := d.getVolume(ctx, "", volumeName) + if err != nil { + return nil, goof.WithFieldsE(fields, "Error getting volume", err) + } + volsToCheck, convErr := d.toTypesVolume(ctx, ec2VolsToCheck, false) + if convErr != nil { + return nil, goof.WithFieldsE(fields, "Error converting to types.Volume", + convErr) + } + + if len(volsToCheck) > 0 { + return nil, goof.WithFields(fields, "volume name already exists") + } + + // Get volume to copy using volumeID + ec2vols, err = d.getVolume(ctx, volumeID, "") + if err != nil { + return &types.Volume{}, goof.WithFieldsE(fields, + "error getting volume", err) + } + volumes, convErr2 := d.toTypesVolume(ctx, ec2vols, false) + if convErr2 != nil { + return nil, goof.WithFieldsE(fields, + "Error converting to types.Volume", convErr2) + } + if len(volumes) > 1 { + return &types.Volume{}, + goof.WithFields(fields, "multiple volumes returned") + } else if len(volumes) == 0 { + return &types.Volume{}, goof.WithFields(fields, "no volumes returned") + } + + // Create temporary snapshot + snapshotName := fmt.Sprintf("temp-%s-%d", volumeID, time.Now().UnixNano()) + fields["snapshotName"] = snapshotName + snapshot, err = d.VolumeSnapshot(ctx, volumeID, snapshotName, opts) + if err != nil { + return &types.Volume{}, goof.WithFieldsE(fields, + "error creating temporary snapshot", err) + } + + // Use temporary snapshot to create volume + vol, err = d.VolumeCreateFromSnapshot(ctx, snapshot.ID, + volumeName, &types.VolumeCreateOpts{Encrypted: &snapshot.Encrypted, + Opts: opts}) + if err != nil { + return &types.Volume{}, goof.WithFieldsE(fields, + "error creating volume copy from snapshot", err) + } + + // Remove temporary snapshot created + if err = d.SnapshotRemove(ctx, snapshot.ID, opts); err != nil { + return &types.Volume{}, goof.WithFieldsE(fields, + "error removing temporary snapshot", err) + } + + log.Println("Created volume " + vol.ID + " from volume " + volumeID) + return vol, nil + */ +} + +// VolumeSnapshot snapshots a volume. +func (d *driver) VolumeSnapshot( + ctx types.Context, + volumeID, snapshotName string, + opts types.Store) (*types.Snapshot, error) { + // TODO Snapshots are not implemented yet + return nil, types.ErrNotImplemented + /* + // Create snapshot with EC2 API call + csInput := &awsec2.CreateSnapshotInput{ + VolumeId: &volumeID, + } + + resp, err := d.ec2Instance.CreateSnapshot(csInput) + if err != nil { + return nil, goof.WithError("Error creating snapshot", err) + } + + // Add tags to EC2 snapshot + if err = d.createTags(*resp.SnapshotId, snapshotName); err != nil { + return &types.Snapshot{}, goof.WithError( + "Error creating tags", err) + } + + log.Println("Waiting for snapshot to complete") + err = d.waitSnapshotComplete(ctx, *resp.SnapshotId) + if err != nil { + return &types.Snapshot{}, goof.WithError( + "Error waiting for snapshot creation", err) + } + + // Check if successful snapshot + snapshot, err := d.SnapshotInspect(ctx, *resp.SnapshotId, nil) + if err != nil { + return &types.Snapshot{}, goof.WithError( + "Error getting snapshot", err) + } + + log.Println("Created Snapshot: " + snapshot.ID) + return snapshot, nil + */ +} + +// VolumeRemove removes a volume. +func (d *driver) VolumeRemove( + ctx types.Context, + volumeID string, + opts types.Store) error { + // Initialize for logging + fields := map[string]interface{}{ + "provider": d.Name(), + "volumeID": volumeID, + } + + //TODO check if volume is attached? if so fail + + // Delete volume via EC2 API call + dvInput := &awsec2.DeleteVolumeInput{ + VolumeId: &volumeID, + } + _, err := d.ec2Instance.DeleteVolume(dvInput) + if err != nil { + return goof.WithFieldsE(fields, "error deleting volume", err) + } + + return nil +} + +// VolumeAttach attaches a volume and provides a token clients can use +// to validate that device has appeared locally. +func (d *driver) VolumeAttach( + ctx types.Context, + volumeID string, + opts *types.VolumeAttachOpts) (*types.Volume, string, error) { + // review volume with attachments to any host + ec2vols, err := d.getVolume(ctx, volumeID, "") + if err != nil { + return nil, "", goof.WithError("Error getting volume", err) + } + volumes, convErr := d.toTypesVolume(ctx, ec2vols, true) + if convErr != nil { + return nil, "", goof.WithError( + "Error converting to types.Volume", convErr) + } + + // Check if there a volume to attach + if len(volumes) == 0 { + return nil, "", goof.New("no volume found") + } + // Check if volume is already attached + if len(volumes[0].Attachments) > 0 && !opts.Force { + return nil, "", goof.New("volume already attached to a host") + } + // Detach already attached volume if forced + if opts.Force { + if _, err := d.VolumeDetach(ctx, volumeID, nil); err != nil { + return nil, "", goof.WithError("Error detaching volume", err) + } + } + // Retrieve next device name + nextDeviceName := "" + if opts.NextDevice != nil { + nextDeviceName = *opts.NextDevice + } + + // Attach volume via helper function which uses EC2 API call + err = d.attachVolume(ctx, volumeID, volumes[0].Name, nextDeviceName) + if err != nil { + return nil, "", goof.WithFieldsE( + log.Fields{ + "provider": d.Name(), + "volumeID": volumeID}, + "error attaching volume", + err, + ) + } + + // Wait for volume's status to update + if err = d.waitVolumeComplete(ctx, volumeID, waitVolumeAttach); err != nil { + return nil, "", goof.WithError("error waiting for volume attach", err) + } + + // Check if successful attach + attachedVol, err := d.VolumeInspect( + ctx, volumeID, &types.VolumeInspectOpts{ + Attachments: true, + Opts: opts.Opts, + }) + if err != nil { + return nil, "", goof.WithError("error getting volume", err) + } + + // Token is the attachment's device name, which will be matched + // to the executor's device ID + return attachedVol, nextDeviceName, nil +} + +// VolumeDetach detaches a volume. +func (d *driver) VolumeDetach( + ctx types.Context, + volumeID string, + opts *types.VolumeDetachOpts) (*types.Volume, error) { + // review volume with attachments to any host + ec2vols, err := d.getVolume(ctx, volumeID, "") + if err != nil { + return nil, goof.WithError("Error getting volume", err) + } + volumes, convErr := d.toTypesVolume(ctx, ec2vols, true) + if convErr != nil { + return nil, goof.WithError("Error converting to types.Volume", convErr) + } + + // no volumes to detach + if len(volumes) == 0 { + return nil, goof.New("no volume returned") + } + + // volume has no attachments + if len(volumes[0].Attachments) == 0 { + return nil, goof.New("volume already detached") + } + + dvInput := &awsec2.DetachVolumeInput{ + VolumeId: &volumeID, + Force: &opts.Force, + } + + // Detach volume using EC2 API call + if _, err = d.ec2Instance.DetachVolume(dvInput); err != nil { + return nil, goof.WithFieldsE( + log.Fields{ + "provider": d.Name(), + "volumeID": volumeID}, "error detaching volume", err) + } + + if err = d.waitVolumeComplete(ctx, volumeID, waitVolumeDetach); err != nil { + return nil, goof.WithError("error waiting for volume detach", err) + } + + ctx.Info("detached volume", volumeID) + + // check if successful detach + detachedVol, err := d.VolumeInspect( + ctx, volumeID, &types.VolumeInspectOpts{ + Attachments: true, + Opts: opts.Opts, + }) + if err != nil { + return nil, goof.WithError("error getting volume", err) + } + + return detachedVol, nil +} + +// Snapshots returns all volumes or a filtered list of snapshots. +func (d *driver) Snapshots( + ctx types.Context, + opts types.Store) ([]*types.Snapshot, error) { + // TODO Snapshots are not implemented yet + return nil, types.ErrNotImplemented + /* + // Get all snapshots + ec2snapshots, err := d.getSnapshot(ctx, "", "", "") + if err != nil { + return nil, goof.WithError("error getting snapshot", err) + } + if len(ec2snapshots) == 0 { + return nil, goof.New("no snapshots returned") + } + // Convert to libStorage types.Snapshot + snapshots := d.toTypesSnapshot(ec2snapshots) + return snapshots, nil + */ +} + +// SnapshotInspect inspects a single snapshot. +func (d *driver) SnapshotInspect( + ctx types.Context, + snapshotID string, + opts types.Store) (*types.Snapshot, error) { + // TODO Snapshots are not implemented yet + return nil, types.ErrNotImplemented + /* + // Get snapshot corresponding to snapshot ID + ec2snapshots, err := d.getSnapshot(ctx, "", snapshotID, "") + if err != nil { + return nil, goof.WithError("error getting snapshot", err) + } + if len(ec2snapshots) == 0 { + return nil, goof.New("no snapshots returned") + } + // Convert to libStorage types.Snapshot + snapshots := d.toTypesSnapshot(ec2snapshots) + + // Because getSnapshot returns an array + // and we only expect the 1st element to be a match, return 1st element + return snapshots[0], nil + */ +} + +// SnapshotCopy copies an existing snapshot. +func (d *driver) SnapshotCopy( + ctx types.Context, + snapshotID, snapshotName, destinationID string, + opts types.Store) (*types.Snapshot, error) { + // TODO Snapshots are not implemented yet + return nil, types.ErrNotImplemented + /* + // no snapshot id inputted + if snapshotID == "" { + return &types.Snapshot{}, goof.New("Missing snapshotID") + } + + // Get snapshot to copy + origSnapshots, err := d.getSnapshot(ctx, "", snapshotID, "") + if err != nil { + return &types.Snapshot{}, + goof.WithError("Error getting snapshot", err) + } + + if len(origSnapshots) > 1 { + return &types.Snapshot{}, + goof.New("multiple snapshots returned") + } else if len(origSnapshots) == 0 { + return &types.Snapshot{}, goof.New("no snapshots returned") + } + + // Copy snapshot with EC2 API call + snapshotID = *(origSnapshots[0]).SnapshotId + snapshotName = d.getName(origSnapshots[0].Tags) + + options := &awsec2.CopySnapshotInput{ + SourceSnapshotId: &snapshotID, + SourceRegion: &d.instanceDocument.Region, + Description: aws.String(fmt.Sprintf("Copy of %s", snapshotID)), + } + resp := &awsec2.CopySnapshotOutput{} + + resp, err = d.ec2Instance.CopySnapshot(options) + if err != nil { + return nil, goof.WithError("error copying snapshot", err) + } + + // Add tags to copied snapshot + if err = d.createTags(*resp.SnapshotId, snapshotName); err != nil { + return &types.Snapshot{}, goof.WithError( + "Error creating tags", err) + } + + log.WithFields(log.Fields{ + "moduleName": d.Name(), + "driverName": d.Name(), + "snapshotName": snapshotName, + "resp.SnapshotId": *resp.SnapshotId}).Info("waiting for snapshot to complete") + + // Wait for snapshot status to update + err = d.waitSnapshotComplete(ctx, *resp.SnapshotId) + if err != nil { + return &types.Snapshot{}, goof.WithError( + "Error waiting for snapshot creation", err) + } + + // Check if successful snapshot + snapshotCopy, err := d.SnapshotInspect(ctx, *resp.SnapshotId, nil) + if err != nil { + return &types.Snapshot{}, goof.WithError( + "Error getting snapshot copy", err) + } + destinationID = snapshotCopy.ID + + log.Println("Copied Snapshot: " + destinationID) + return snapshotCopy, nil + */ +} + +// SnapshotRemove removes a snapshot. +func (d *driver) SnapshotRemove( + ctx types.Context, + snapshotID string, + opts types.Store) error { + // TODO Snapshots are not implemented yet + return types.ErrNotImplemented + /* + // Initialize for logging + fields := map[string]interface{}{ + "provider": d.Name(), + "snapshotID": snapshotID, + } + + // no snapshot ID inputted + if snapshotID == "" { + return goof.New("missing snapshot id") + } + + // Delete snapshot using EC2 API call + dsInput := &awsec2.DeleteSnapshotInput{ + SnapshotId: &snapshotID, + } + _, err := d.ec2Instance.DeleteSnapshot(dsInput) + if err != nil { + return goof.WithFieldsE(fields, "error deleting snapshot", err) + } + + return nil + */ +} + +/////////////////////////////////////////////////////////////////////// +///////// HELPER FUNCTIONS SPECIFIC TO PROVIDER ///////// +/////////////////////////////////////////////////////////////////////// +// getVolume searches for and returns volumes matching criteria +func (d *driver) getVolume( + ctx types.Context, + volumeID, volumeName string) ([]*awsec2.Volume, error) { + // Prepare filters + filters := []*awsec2.Filter{} + if volumeName != "" { + filters = append(filters, &awsec2.Filter{ + Name: aws.String("tag:Name"), Values: []*string{&volumeName}}) + } + + if volumeID != "" { + filters = append(filters, &awsec2.Filter{ + Name: aws.String("volume-id"), Values: []*string{&volumeID}}) + } + + // TODO rexrayTag + /* if d.ec2Tag != "" { + filters = append(filters, &awsec2.Filter{ + Name: aws.String(fmt.Sprintf("tag:%s", d.rexrayTag())), + Values: []*string{&d.ec2Tag}}) + } + */ + // Prepare input + dvInput := &awsec2.DescribeVolumesInput{} + + // Apply filters if arguments are specified + if len(filters) > 0 { + dvInput.Filters = filters + } + + if volumeID != "" { + dvInput.VolumeIds = []*string{&volumeID} + } + + // Retrieve filtered volumes through EC2 API call + resp, err := d.ec2Instance.DescribeVolumes(dvInput) + if err != nil { + return []*awsec2.Volume{}, err + } + + return resp.Volumes, nil +} + +// Converts EC2 API volumes to libStorage types.Volume +func (d *driver) toTypesVolume( + ctx types.Context, + ec2vols []*awsec2.Volume, + attachments bool) ([]*types.Volume, error) { + // Get local devices map from context + ld, ldOK := context.LocalDevices(ctx) + if !ldOK { + return nil, goof.New("Error getting local devices from context") + } + + var volumesSD []*types.Volume + for _, volume := range ec2vols { + var attachmentsSD []*types.VolumeAttachment + // Leave attachment's device name blank if attachments is false + for _, attachment := range volume.Attachments { + deviceName := "" + if attachments { + // Compensate for kernel volume mapping i.e. change "/dev/sda" to "/dev/xvda" + deviceName = strings.Replace(*attachment.Device, "sd", d.nextDeviceInfo.Prefix, 1) + // Keep device name if it is found in local devices + if _, ok := ld.DeviceMap[deviceName]; !ok { + deviceName = "" + } + } + attachmentSD := &types.VolumeAttachment{ + VolumeID: *attachment.VolumeId, + InstanceID: &types.InstanceID{ID: *attachment.InstanceId, Driver: d.Name()}, + DeviceName: deviceName, + Status: *attachment.State, + } + attachmentsSD = append(attachmentsSD, attachmentSD) + } + name := d.getName(volume.Tags) + volumeSD := &types.Volume{ + Name: name, + ID: *volume.VolumeId, + AvailabilityZone: *volume.AvailabilityZone, + Encrypted: *volume.Encrypted, + Status: *volume.State, + Type: *volume.VolumeType, + Size: *volume.Size, + Attachments: attachmentsSD, + } + // Some volume types have no IOPS, so we get nil in volume.Iops + if volume.Iops != nil { + volumeSD.IOPS = *volume.Iops + } + volumesSD = append(volumesSD, volumeSD) + } + return volumesSD, nil +} + +// getSnapshot searches for and returns snapshots matching criteria +// TODO Snapshots are not implemented yet +/* +func (d *driver) getSnapshot( + ctx types.Context, + volumeID, snapshotID, snapshotName string) ([]*awsec2.Snapshot, error) { + // Prepare filters + filters := []*awsec2.Filter{} + if snapshotName != "" { + filters = append(filters, &awsec2.Filter{ + Name: aws.String("tag:Name"), Values: []*string{&snapshotName}}) + } + + if volumeID != "" { + filters = append(filters, &awsec2.Filter{ + Name: aws.String("volume-id"), Values: []*string{&volumeID}}) + } + + if snapshotID != "" { + //using SnapshotIds in request is returning stale data + filters = append(filters, &awsec2.Filter{ + Name: aws.String("snapshot-id"), Values: []*string{&snapshotID}}) + } + + // TODO rexrayTag? + // if d.ec2Tag != "" { + // filters = append(filters, &ec2.Filter{ + // Name: aws.String(fmt.Sprintf("tag:%s", rexrayTag)), + // Values: []*string{&d.ec2Tag}}) + //} + + // Prepare input + dsInput := &awsec2.DescribeSnapshotsInput{} + + // Apply filters if arguments are specified + if len(filters) > 0 { + dsInput.Filters = filters + } + + // Retrieve filtered volumes through EC2 API call + resp, err := d.ec2Instance.DescribeSnapshots(dsInput) + if err != nil { + return nil, err + } + + return resp.Snapshots, nil +} + + +// Converts EC2 API snapshots to libStorage types.Snapshot +func (d *driver) toTypesSnapshot( + ec2snapshots []*awsec2.Snapshot) []*types.Snapshot { + var snapshotsInt []*types.Snapshot + for _, snapshot := range ec2snapshots { + name := d.getName(snapshot.Tags) + snapshotSD := &types.Snapshot{ + Name: name, + VolumeID: *snapshot.VolumeId, + ID: *snapshot.SnapshotId, + Encrypted: *snapshot.Encrypted, + VolumeSize: *snapshot.VolumeSize, + StartTime: (*snapshot.StartTime).Unix(), + Description: *snapshot.Description, + Status: *snapshot.State, + } + snapshotsInt = append(snapshotsInt, snapshotSD) + } + + return snapshotsInt +} +*/ + +// Used in VolumeAttach +func (d *driver) attachVolume( + ctx types.Context, volumeID, volumeName, deviceName string) error { + // sanity check # of volumes to attach + vol, err := d.getVolume(ctx, volumeID, volumeName) + if err != nil { + return goof.WithError("Error getting volume", err) + } + + if len(vol) == 0 { + return goof.New("no volume returned") + } + if len(vol) > 1 { + return goof.New("too many volumes returned") + } + + // Attach volume via EC2 API call + avInput := &awsec2.AttachVolumeInput{ + Device: &deviceName, + InstanceId: &d.instanceDocument.InstanceID, + VolumeId: &volumeID, + } + if _, err := d.ec2Instance.AttachVolume(avInput); err != nil { + return err + } + return nil +} + +// Retrieve instance identity document to get metadata for initialization +func getInstanceIdentityDocument() (*instanceIdentityDocument, error) { + // Check connection + conn, err := net.DialTimeout("tcp", "169.254.169.254:80", 50*time.Millisecond) + if err != nil { + return &instanceIdentityDocument{}, fmt.Errorf("Error: %v\n", err) + } + defer conn.Close() + + // Retrieve instance identity document + url := "http://169.254.169.254/latest/dynamic/instance-identity/document" + resp, err := http.Get(url) + if err != nil { + return &instanceIdentityDocument{}, fmt.Errorf("Error: %v\n", err) + } + + defer resp.Body.Close() + + // Read contents + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return &instanceIdentityDocument{}, fmt.Errorf("Error: %v\n", err) + } + + // Parse into instanceIdentityDocument + var document instanceIdentityDocument + err = json.Unmarshal(data, &document) + if err != nil { + return &instanceIdentityDocument{}, fmt.Errorf("Error: %v\n", err) + } + + return &document, nil +} + +// Used in VolumeCreate +func (d *driver) createVolume(ctx types.Context, volumeName, snapshotID string, + vol *types.Volume) (*awsec2.Volume, error) { + var ( + err error + server awsec2.Instance + ) + // Create volume using EC2 API call + if server, err = d.getInstance(); err != nil { + return &awsec2.Volume{}, goof.WithError( + "error creating volume with EC2 API call", err) + } + + // Fill in Availability Zone if needed + d.createVolumeEnsureAvailabilityZone(&vol.AvailabilityZone, &server) + + options := &awsec2.CreateVolumeInput{ + Size: &vol.Size, + AvailabilityZone: &vol.AvailabilityZone, + Encrypted: &vol.Encrypted, + VolumeType: &vol.Type, + } + if snapshotID != "" { + options.SnapshotId = &snapshotID + } + + if vol.IOPS > 0 { + options.Iops = &vol.IOPS + } + var resp *awsec2.Volume + + if resp, err = d.ec2Instance.CreateVolume(options); err != nil { + return &awsec2.Volume{}, goof.WithError( + "Error creating volume", err) + } + + // Add tags to created volume + if err = d.createTags(*resp.VolumeId, volumeName); err != nil { + return &awsec2.Volume{}, goof.WithError( + "Error creating tags", err) + } + + // Wait for volume status to change + if err = d.waitVolumeComplete( + ctx, *resp.VolumeId, waitVolumeCreate); err != nil { + return &awsec2.Volume{}, goof.WithError( + "Error waiting for volume creation", err) + } + + return resp, nil +} + +// Make sure Availability Zone is non-empty and valid +func (d *driver) createVolumeEnsureAvailabilityZone( + availabilityZone *string, server *awsec2.Instance) { + if *availabilityZone == "" { + *availabilityZone = *server.Placement.AvailabilityZone + } +} + +// Fill in tags for volume or snapshot +func (d *driver) createTags(id, name string) (err error) { + var ( + ctInput *awsec2.CreateTagsInput + inputName string + ) + initCTInput := func() { + if ctInput != nil { + return + } + ctInput = &awsec2.CreateTagsInput{ + Resources: []*string{&id}, + Tags: []*awsec2.Tag{}, + } + // Append config tag to name + inputName = d.getFullName(d.getPrintableName(name)) + } + + initCTInput() + ctInput.Tags = append( + ctInput.Tags, + &awsec2.Tag{ + Key: aws.String("Name"), + Value: &inputName, + }) + + // TODO rexrayTag + /* if d.ec2Tag != "" { + initCTInput() + ctInput.Tags = append( + ctInput.Tags, + &awsec2.Tag{ + Key: aws.String(d.rexrayTag()), + Value: &d.ec2Tag, + }) + } + */ + _, err = d.ec2Instance.CreateTags(ctInput) + if err != nil { + return goof.WithError("Error creating tags", err) + } + return nil +} + +// Wait for volume action to complete (creation, attachment, detachment) +func (d *driver) waitVolumeComplete( + ctx types.Context, volumeID, action string) error { + // no volume id inputted + if volumeID == "" { + return goof.New("Missing volume ID") + } + +UpdateLoop: + for { + // update volume + volumes, err := d.getVolume(ctx, volumeID, "") + if err != nil { + return goof.WithError("Error getting volume", err) + } + + // check retrieved volume + switch action { + case waitVolumeCreate: + if *volumes[0].State == awsec2.VolumeStateAvailable { + break UpdateLoop + } + case waitVolumeDetach: + if len(volumes[0].Attachments) == 0 { + break UpdateLoop + } + case waitVolumeAttach: + if len(volumes[0].Attachments) == 1 && + *volumes[0].Attachments[0].State == awsec2.VolumeAttachmentStateAttached { + break UpdateLoop + } + } + time.Sleep(1 * time.Second) + } + + return nil +} + +// Wait for snapshot action to complete +// TODO Snapshots are not implemented yet +/* +func (d *driver) waitSnapshotComplete( + ctx types.Context, snapshotID string) error { + if snapshotID == "" { + return goof.New("Missing snapshot ID") + } + + for { + // update snapshot + snapshots, err := d.getSnapshot(ctx, "", snapshotID, "") + if err != nil { + return goof.WithError( + "Error getting snapshot", err) + } + + // check retrieved snapshot + if len(snapshots) == 0 { + return goof.New("No snapshots found") + } + snapshot := snapshots[0] + if *snapshot.State == awsec2.SnapshotStateCompleted { + break + } + if *snapshot.State == awsec2.SnapshotStateError { + return goof.Newf("Snapshot state error: %s", *snapshot.StateMessage) + } + time.Sleep(1 * time.Second) + } + + return nil +} +*/ + +// Retrieve volume or snapshot name +func (d *driver) getName(tags []*awsec2.Tag) string { + for _, tag := range tags { + if *tag.Key == "Name" { + return *tag.Value + } + } + return "" +} + +// Retrieve current instance using EC2 API call +func (d *driver) getInstance() (awsec2.Instance, error) { + diInput := &awsec2.DescribeInstancesInput{ + InstanceIds: []*string{&d.instanceDocument.InstanceID}, + } + resp, err := d.ec2Instance.DescribeInstances(diInput) + if err != nil { + return awsec2.Instance{}, goof.WithError( + "error retrieving instance with EC2 API call", err) + } + + return *resp.Reservations[0].Instances[0], nil +} + +// Get volume or snapshot name without config tag +func (d *driver) getPrintableName(name string) string { + return strings.TrimPrefix(name, d.tag()+ebs.TagDelimiter) +} + +// Prefix volume or snapshot name with config tag +func (d *driver) getFullName(name string) string { + if d.tag() != "" { + return d.tag() + ebs.TagDelimiter + name + } + return name +} + +// Retrieve config arguments +func (d *driver) accessKey() string { + if accessKey := d.config.GetString("ebs.accessKey"); accessKey != "" { + return accessKey + } + return d.config.GetString("ec2.accessKey") +} + +func (d *driver) secretKey() string { + if secretKey := d.config.GetString("ebs.secretKey"); secretKey != "" { + return secretKey + } + return d.config.GetString("ec2.secretKey") +} + +func (d *driver) region() string { + if region := d.config.GetString("ebs.region"); region != "" { + return region + } + return d.config.GetString("ec2.region") +} + +func (d *driver) endpoint() string { + if endpoint := d.config.GetString("ebs.endpoint"); endpoint != "" { + return endpoint + } + return d.config.GetString("ec2.endpoint") +} + +func (d *driver) maxRetries() int { + // if maxRetries in config is non-numeric or a negative number, + // set it to the default number of max retries. + if maxRetriesString := d.config.GetString("ebs.maxRetries"); maxRetriesString != "" { + if maxRetriesString == "0" { + return 0 + } else if maxRetries := d.config.GetInt("ebs.maxRetries"); maxRetries > 0 { + return maxRetries + } + } else if maxRetriesString := d.config.GetString( + "ec2.maxRetries"); maxRetriesString != "" { + if maxRetriesString == "0" { + return 0 + } else if maxRetries := d.config.GetInt("ec2.maxRetries"); maxRetries > 0 { + return maxRetries + } + } + return ebs.DefaultMaxRetries +} + +func (d *driver) tag() string { + if tag := d.config.GetString("ebs.tag"); tag != "" { + return tag + } + return d.config.GetString("ec2.tag") +} + +// TODO rexrayTag +/*func (d *driver) rexrayTag() string { + if rexrayTag := d.config.GetString("ebs.rexrayTag"); rexrayTag != "" { + return rexrayTag + } + return d.config.GetString("ec2.rexrayTag") +}*/ diff --git a/drivers/storage/ebs/tests/README.md b/drivers/storage/ebs/tests/README.md new file mode 100644 index 00000000..b902ac85 --- /dev/null +++ b/drivers/storage/ebs/tests/README.md @@ -0,0 +1,9 @@ +#EBS driver testing +The credentials for the EBS driver test may be supplied by creating and filling out a `credentials` file under a directory `~/.aws/`, as specified in the AWS blog post [A New and Standardized Way to Manage Credentials in the AWS SDKs](https://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs): + +Location: `~/.aws/credentials` +``` +[default] +aws_access_key_id=AKIAIOSFODNN7EXAMPLE +aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +``` diff --git a/drivers/storage/ebs/tests/coverage.mk b/drivers/storage/ebs/tests/coverage.mk new file mode 100644 index 00000000..2b75a148 --- /dev/null +++ b/drivers/storage/ebs/tests/coverage.mk @@ -0,0 +1,2 @@ +EBS_COVERPKG := $(ROOT_IMPORT_PATH)/drivers/storage/ebs +TEST_COVERPKG_./drivers/storage/ebs/tests := $(EBS_COVERPKG),$(EBS_COVERPKG)/executor diff --git a/drivers/storage/ebs/tests/ebs_test.go b/drivers/storage/ebs/tests/ebs_test.go new file mode 100644 index 00000000..fa3503bc --- /dev/null +++ b/drivers/storage/ebs/tests/ebs_test.go @@ -0,0 +1,728 @@ +package ebs + +import ( + "os" + "strconv" + "strings" + "testing" + + log "github.com/Sirupsen/logrus" + "github.com/akutz/gofig" + "github.com/stretchr/testify/assert" + + "github.com/emccode/libstorage/api/context" + "github.com/emccode/libstorage/api/registry" + "github.com/emccode/libstorage/api/server" + apitests "github.com/emccode/libstorage/api/tests" + "github.com/emccode/libstorage/api/types" + "github.com/emccode/libstorage/api/utils" + "github.com/emccode/libstorage/drivers/storage/ebs" + ec2x "github.com/emccode/libstorage/drivers/storage/ebs/executor" +) + +// Put contents of sample config.yml here +var ( + configYAMLec2 = []byte(` +ec2: + region: us-west-2 + tag: RR + endpoint: ec2.us-west-2.amazonaws.com`) + configYAMLebs = []byte(` +ebs: + region: us-west-2 + tag: RR + endpoint: ec2.us-west-2.amazonaws.com`) +) + +var volumeName string +var volumeName2 string + +// Check environment vars to see whether or not to run this test +func skipTests() bool { + travis, _ := strconv.ParseBool(os.Getenv("TRAVIS")) + noTestEC2, _ := strconv.ParseBool(os.Getenv("TEST_SKIP_EC2")) + noTestEBS, _ := strconv.ParseBool(os.Getenv("TEST_SKIP_EBS")) + return travis || (noTestEC2 || noTestEBS) +} + +// Set volume names to first part of UUID before the - +func init() { + uuid, _ := types.NewUUID() + uuids := strings.Split(uuid.String(), "-") + volumeName = uuids[0] + uuid, _ = types.NewUUID() + uuids = strings.Split(uuid.String(), "-") + volumeName2 = uuids[0] +} + +func TestMain(m *testing.M) { + server.CloseOnAbort() + ec := m.Run() + os.Exit(ec) +} + +/////////////////////////////////////////////////////////////////////// +///////// PUBLIC TESTS ///////// +/////////////////////////////////////////////////////////////////////// +// Test if backwards compatibility for "ec2" and "ebs" work in config +func TestConfig(t *testing.T) { + if skipTests() { + t.SkipNow() + } + tfEBS := func(config gofig.Config, client types.Client, t *testing.T) { + assert.NotEqual(t, config.GetString("ebs.tag"), "") + assert.Equal(t, config.GetString("ec2.tag"), "") + } + tfEC2 := func(config gofig.Config, client types.Client, t *testing.T) { + assert.NotEqual(t, config.GetString("ec2.tag"), "") + assert.Equal(t, config.GetString("ebs.tag"), "") + } + apitests.Run(t, "ec2", configYAMLebs, tfEBS) + apitests.Run(t, "ec2", configYAMLec2, tfEC2) + apitests.Run(t, ebs.Name, configYAMLebs, tfEBS) + apitests.Run(t, ebs.Name, configYAMLec2, tfEC2) +} + +// Check if InstanceID metadata is properly returned by executor +// and InstanceID.ID is filled out by InstanceInspect +func TestInstanceID(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + // create storage driver + sd, err := registry.NewStorageDriver(ebs.Name) + if err != nil { + t.Fatal(err) + } + + // initialize storage driver + ctx := context.Background() + if err := sd.Init(ctx, gofig.New()); err != nil { + t.Fatal(err) + } + // Get Instance ID metadata from executor + iid, err := ec2x.InstanceID() + assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } + + // Fill in Instance ID's ID field with InstanceInspect + ctx = ctx.WithValue(context.InstanceIDKey, iid) + i, err := sd.InstanceInspect(ctx, utils.NewStore()) + if err != nil { + t.Fatal(err) + } + + iid = i.InstanceID + + // test resulting InstanceID + apitests.Run( + t, ebs.Name, nil, + (&apitests.InstanceIDTest{ + Driver: ebs.Name, + Expected: iid, + }).Test) + +} + +// Check if InstanceID metadata is properly returned by executor +// and InstanceID.ID is filled out by InstanceInspect +func TestInstanceIDEC2(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + // create storage driver + sd, err := registry.NewStorageDriver("ec2") + if err != nil { + t.Fatal(err) + } + + // initialize storage driver + ctx := context.Background() + if err := sd.Init(ctx, gofig.New()); err != nil { + t.Fatal(err) + } + // Get Instance ID metadata from executor + iid, err := ec2x.InstanceID() + assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } + + // Fill in Instance ID's ID field with InstanceInspect + ctx = ctx.WithValue(context.InstanceIDKey, iid) + i, err := sd.InstanceInspect(ctx, utils.NewStore()) + if err != nil { + t.Fatal(err) + } + + iid = i.InstanceID + + // test resulting InstanceID + apitests.Run( + t, "ec2", nil, + (&apitests.InstanceIDTest{ + Driver: "ec2", + Expected: iid, + }).Test) + +} + +// Test if Services are configured and returned properly from the client +func TestServices(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + tf := func(config gofig.Config, client types.Client, t *testing.T) { + reply, err := client.API().Services(nil) + assert.NoError(t, err) + assert.Equal(t, len(reply), 1) + + _, ok := reply[ebs.Name] + assert.True(t, ok) + } + tf2 := func(config gofig.Config, client types.Client, t *testing.T) { + reply, err := client.API().Services(nil) + assert.NoError(t, err) + assert.Equal(t, len(reply), 1) + + _, ok := reply["ec2"] + assert.True(t, ok) + } + apitests.Run(t, ebs.Name, configYAMLec2, tf) + apitests.Run(t, "ec2", configYAMLec2, tf2) +} + +// Test volume functionality from storage driver +func TestVolumeAttach(t *testing.T) { + if skipTests() { + t.SkipNow() + } + var vol *types.Volume + tf := func(config gofig.Config, client types.Client, t *testing.T) { + vol = volumeCreate(t, client, volumeName, config.GetString("ec2.tag")) + _ = volumeAttach(t, client, vol.ID) + _ = volumeInspectAttached(t, client, vol.ID) + _ = volumeInspectDetachedFail(t, client, vol.ID) + _ = volumeDetach(t, client, vol.ID) + _ = volumeInspectDetached(t, client, vol.ID) + volumeRemove(t, client, vol.ID) + } + apitests.Run(t, ebs.Name, configYAMLec2, tf) +} + +// Test volume functionality from storage driver +func TestVolumeCreateRemove(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + tf := func(config gofig.Config, client types.Client, t *testing.T) { + vol := volumeCreate(t, client, volumeName, config.GetString("ec2.tag")) + volumeRemove(t, client, vol.ID) + } + //apitests.Run(t, ebs.Name, configYAMLec2, tf) + apitests.Run(t, "ebs", configYAMLec2, tf) +} + +// Test volume functionality from storage driver +func TestEncryptedVolumeCreateRemove(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + tf := func(config gofig.Config, client types.Client, t *testing.T) { + vol := volumeCreateEncrypted(t, client, volumeName, config.GetString("ec2.tag")) + volumeRemove(t, client, vol.ID) + } + //apitests.Run(t, ebs.Name, configYAMLec2, tf) + apitests.Run(t, "ebs", configYAMLec2, tf) +} + +// Test volume functionality from storage driver +func TestVolumes(t *testing.T) { + if skipTests() { + t.SkipNow() + } + + tf := func(config gofig.Config, client types.Client, t *testing.T) { + _ = volumeCreate(t, client, volumeName, config.GetString("ec2.tag")) + _ = volumeCreate(t, client, volumeName2, config.GetString("ec2.tag")) + + vol1 := volumeByName(t, client, volumeName, config.GetString("ec2.tag")) + vol2 := volumeByName(t, client, volumeName2, config.GetString("ec2.tag")) + + volumeRemove(t, client, vol1.ID) + volumeRemove(t, client, vol2.ID) + } + apitests.Run(t, ebs.Name, configYAMLec2, tf) +} + +/////////////////////////////////////////////////////////////////////// +///////// PRIVATE TESTS FOR VOLUME FUNCTIONALITY ///////// +/////////////////////////////////////////////////////////////////////// +// Test volume creation specifying size and volume name +func volumeCreate( + t *testing.T, client types.Client, volumeName, tag string) *types.Volume { + log.WithField("volumeName", volumeName).Info("creating volume") + // Prepare request for storage driver call to create volume + size := int64(1) + + opts := map[string]interface{}{ + "priority": 2, + "owner": "root@example.com", + } + + volumeCreateRequest := &types.VolumeCreateRequest{ + Name: volumeName, + Size: &size, + Opts: opts, + } + + // Send request and retrieve created libStorage types.Volume + reply, err := client.API().VolumeCreate(nil, ebs.Name, volumeCreateRequest) + assert.NoError(t, err) + if err != nil { + t.FailNow() + t.Error("failed volumeCreate") + } + apitests.LogAsJSON(reply, t) + + // If tag is set, then add tag to expected volumeName + if tag != "" { + volumeName = tag + ebs.TagDelimiter + volumeName + } + // Check if name and size are same + assert.Equal(t, volumeName, reply.Name) + assert.Equal(t, size, reply.Size) + return reply +} + +// Test volume creation specifying size, volume name, and encryption +func volumeCreateEncrypted( + t *testing.T, client types.Client, volumeName, tag string) *types.Volume { + log.WithField("volumeName", volumeName).Info("creating encrypted volume") + // Prepare request for storage driver call to create volume + size := int64(2) + encrypted := true + + opts := map[string]interface{}{ + "priority": 2, + "owner": "root@example.com", + } + + volumeCreateRequest := &types.VolumeCreateRequest{ + Name: volumeName, + Size: &size, + Encrypted: &encrypted, + Opts: opts, + } + + // Send request and retrieve created libStorage types.Volume + reply, err := client.API().VolumeCreate(nil, ebs.Name, volumeCreateRequest) + assert.NoError(t, err) + if err != nil { + t.FailNow() + t.Error("failed volumeCreate") + } + apitests.LogAsJSON(reply, t) + + // If tag is set, then add tag to expected volumeName + if tag != "" { + volumeName = tag + ebs.TagDelimiter + volumeName + } + // Check if name and size are same, and volume is encrypted + assert.Equal(t, volumeName, reply.Name) + assert.Equal(t, size, reply.Size) + assert.Equal(t, encrypted, reply.Encrypted) + return reply +} + +// Test volume retrieval by volume name using Volumes, which retrieves all volumes +// from the storage driver without filtering, and filters the volumes externally. +func volumeByName( + t *testing.T, client types.Client, volumeName, tag string) *types.Volume { + log.WithField("volumeName", volumeName).Info("get volume by ebs.Name") + // Retrieve all volumes + vols, err := client.API().Volumes(nil, false) + assert.NoError(t, err) + if err != nil { + t.FailNow() + } + // If tag is set, then add tag to expected volumeName + if tag != "" { + volumeName = tag + ebs.TagDelimiter + volumeName + } + // Filter volumes to those under the ec2 service, + // and find a volume matching inputted volume name + assert.Contains(t, vols, ebs.Name) + for _, vol := range vols[ebs.Name] { + if vol.Name == volumeName { + return vol + } + } + // No matching volumes found + t.FailNow() + t.Error("failed volumeByName") + return nil +} + +// Test volume retrieval by volume ID using Volumes, which retrieves all +// volumes from the storage driver without filtering, and filters the volumes +// externally. Contrast with volumeInspect, which directly retrieves matching +// volumes from the storage driver. +func volumeByID( + t *testing.T, client types.Client, volumeID string) *types.Volume { + log.WithField("volumeID", volumeID).Info("get volume by ebs.Name using ID") + // Retrieve all volumes + vols, err := client.API().Volumes(nil, false) + assert.NoError(t, err) + if err != nil { + t.FailNow() + } + // Filter volumes to those under the ec2 service, + // and find a volume matching inputted volume ID + assert.Contains(t, vols, ebs.Name) + for _, vol := range vols[ebs.Name] { + if vol.ID == volumeID { + return vol + } + } + // No matching volumes found + t.FailNow() + t.Error("failed volumeByID") + return nil +} + +// Test volume removal by volume ID +func volumeRemove(t *testing.T, client types.Client, volumeID string) { + log.WithField("volumeID", volumeID).Info("removing volume") + err := client.API().VolumeRemove( + nil, ebs.Name, volumeID) + assert.NoError(t, err) + + if err != nil { + t.Error("failed volumeRemove") + t.FailNow() + } +} + +// Test volume attachment by volume ID +func volumeAttach( + t *testing.T, client types.Client, volumeID string) *types.Volume { + log.WithField("volumeID", volumeID).Info("attaching volume") + // Get next device name from executor + nextDevice, err := client.Executor().NextDevice(context.Background().WithValue(context.ServiceKey, ebs.Name), + utils.NewStore()) + assert.NoError(t, err) + if err != nil { + t.Error("error getting next device name from executor") + t.FailNow() + } + + reply, token, err := client.API().VolumeAttach( + nil, ebs.Name, volumeID, &types.VolumeAttachRequest{ + NextDeviceName: &nextDevice, + }) + + assert.NoError(t, err) + if err != nil { + t.Error("failed volumeAttach") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.NotEqual(t, token, "") + + return reply +} + +// Test volume retrieval by volume ID using VolumeInspect, which directly +// retrieves matching volumes from the storage driver. Contrast with +// volumeByID, which uses Volumes to retrieve all volumes from the storage +// driver without filtering, and filters the volumes externally. +func volumeInspect( + t *testing.T, client types.Client, volumeID string) *types.Volume { + log.WithField("volumeID", volumeID).Info("inspecting volume") + reply, err := client.API().VolumeInspect(nil, ebs.Name, volumeID, false) + assert.NoError(t, err) + + if err != nil { + t.Error("failed volumeInspect") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + return reply +} + +// Test if volume is attached, its Attachments field should be populated +func volumeInspectAttached( + t *testing.T, client types.Client, volumeID string) *types.Volume { + log.WithField("volumeID", volumeID).Info("inspecting volume") + reply, err := client.API().VolumeInspect(nil, ebs.Name, volumeID, true) + assert.NoError(t, err) + + if err != nil { + t.Error("failed volumeInspectAttached") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.Len(t, reply.Attachments, 1) + return reply +} + +// Test if volume is detached, its Attachments field should not be populated +func volumeInspectDetached( + t *testing.T, client types.Client, volumeID string) *types.Volume { + log.WithField("volumeID", volumeID).Info("inspecting volume") + reply, err := client.API().VolumeInspect(nil, ebs.Name, volumeID, true) + assert.NoError(t, err) + + if err != nil { + t.Error("failed volumeInspectDetached") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.Len(t, reply.Attachments, 0) + apitests.LogAsJSON(reply, t) + return reply +} + +// Test if volume is attached, but VolumeInspect is called with the attachments +// flag set to false, then its Attachments field should still be populated. +// However, its Attachments' DeviceName field should not be populated. +func volumeInspectDetachedFail( + t *testing.T, client types.Client, volumeID string) *types.Volume { + + log.WithField("volumeID", volumeID).Info("inspecting volume") + reply, err := client.API().VolumeInspect(nil, ebs.Name, volumeID, false) + assert.NoError(t, err) + + if err != nil { + t.Error("failed volumeInspectDetachedFail") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.Len(t, reply.Attachments, 1) + return reply +} + +// Test detaching volume by volume ID +func volumeDetach( + t *testing.T, client types.Client, volumeID string) *types.Volume { + log.WithField("volumeID", volumeID).Info("detaching volume") + reply, err := client.API().VolumeDetach( + nil, ebs.Name, volumeID, &types.VolumeDetachRequest{}) + assert.NoError(t, err) + if err != nil { + t.Error("failed volumeDetach") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + assert.Len(t, reply.Attachments, 0) + return reply +} + +/////////////////////////////////////////////////////////////////////// +///////// PRIVATE TESTS FOR SNAPSHOT FUNCTIONALITY ///////// +/////////////////////////////////////////////////////////////////////// +// TODO Snapshots are not implemented yet +/* +// Test retrieving snapshot by snapshot ID +func snapshotInspect( + t *testing.T, client types.Client, snapshotID string) *types.Snapshot { + log.WithField("snapshotID", snapshotID).Info("inspecting snapshot") + reply, err := client.API().SnapshotInspect(nil, ebs.Name, snapshotID) + assert.NoError(t, err) + + if err != nil { + t.Error("failed snapshotInspect") + t.FailNow() + } + apitests.LogAsJSON(reply, t) + return reply +} + +// Test snapshot retrieval by snapshot name using Snapshots, which retrieves all snapshots +// from the storage driver without filtering, and filters the snapshots externally. +func snapshotByName( + t *testing.T, client types.Client, snapshotName string) *types.Snapshot { + log.WithField("snapshotName", snapshotName).Info("get snapshot by ebs.Name") + // Retrieve all snapshots + snapshots, err := client.API().Snapshots(nil) + assert.NoError(t, err) + if err != nil { + t.FailNow() + } + // Filter snapshots to those under the ec2 service, + // and find a snapshot matching inputted snapshot name + assert.Contains(t, snapshots, ebs.Name) + for _, vol := range snapshots[ebs.Name] { + if vol.Name == snapshotName { + return vol + } + } + // No matching snapshots found + t.FailNow() + t.Error("failed snapshotByName") + return nil +} + +// Test snapshot creation from existing volume, specifying volume ID of volume +// to copy, and snapshot name of snapshot to create +func volumeSnapshot( + t *testing.T, client types.Client, + volumeID, snapshotName string) *types.Snapshot { + log.WithField("snapshotName", snapshotName).Info("creating snapshot") + + // Prepare request for storage driver call to create snapshot + opts := map[string]interface{}{ + "priority": 2, + "owner": "root@example.com", + } + + volumeSnapshotRequest := &types.VolumeSnapshotRequest{ + SnapshotName: snapshotName, + // Opts: opts, + } + + // Send request and retrieve created libStorage types.Snapshot + reply, err := client.API().VolumeSnapshot(nil, ebs.Name, + volumeID, volumeSnapshotRequest) + assert.NoError(t, err) + if err != nil { + t.FailNow() + t.Error("failed volumeSnapshot") + } + apitests.LogAsJSON(reply, t) + + // Check if snapshot name and volume ID are same + assert.Equal(t, snapshotName, reply.Name) + assert.Equal(t, volumeID, reply.VolumeID) + return reply +} + +// Test copying snapshot from existing snapshot +func snapshotCopy( + t *testing.T, client types.Client, + snapshotID, snapshotName, destinationID string) *types.Snapshot { + log.WithField("snapshotName", snapshotName).Info("copying snapshot") + + // Prepare request for storage driver call to copy snapshot + snapshotCopyRequest := &types.SnapshotCopyRequest{ + SnapshotName: snapshotName, + //DestinationID: destinationID, + // Opts: opts, + } + + // Send request and retrieve created libStorage types.Snapshot + reply, err := client.API().SnapshotCopy(nil, ebs.Name, + snapshotID, snapshotCopyRequest) + assert.NoError(t, err) + if err != nil { + t.FailNow() + t.Error("failed snapshotCopy") + } + apitests.LogAsJSON(reply, t) + + // Check if snapshot name is same + assert.Equal(t, snapshotName, reply.Name) + return reply +} + +// Test snapshot removal by snapshot ID +func snapshotRemove(t *testing.T, client types.Client, snapshotID string) { + log.WithField("snapshotID", snapshotID).Info("removing snapshot") + err := client.API().SnapshotRemove( + nil, ebs.Name, snapshotID) + assert.NoError(t, err) + + if err != nil { + t.Error("failed snapshotRemove") + t.FailNow() + } +} + +// Test volume creation from existing snapshot +func volumeCreateFromSnapshot( + t *testing.T, client types.Client, + snapshotID, volumeName string) *types.Volume { + // Prepare request for storage driver call to create volume from snapshot + fields := map[string]interface{}{ + "snapshotID": snapshotID, + "volumeName": volumeName, + } + log.WithFields(fields).Info("creating volume from snapshot") + size := int64(8) + + opts := map[string]interface{}{ + "priority": 2, + "owner": "root@example.com", + } + + volumeCreateRequest := &types.VolumeCreateRequest{ + Name: volumeName, + Size: &size, + Opts: opts, + } + + // Send request and retrieve created libStorage types.Volume + reply, err := client.API().VolumeCreateFromSnapshot(nil, + ebs.Name, snapshotID, volumeCreateRequest) + assert.NoError(t, err) + if err != nil { + t.FailNow() + t.Error("failed volumeCreateFromSnapshot") + } + apitests.LogAsJSON(reply, t) + + // Check if volume name, size, and opts are the same + assert.Equal(t, volumeName, reply.Name) + assert.Equal(t, size, reply.Size) + assert.Equal(t, opts["priority"], 2) + assert.Equal(t, opts["owner"], "root@example.com") + + return reply +} + +// Test copying volume from existing volume, using volume ID of the volume +// to copy, and the desired volume name of the resulting volume copy. +func volumeCopy( + t *testing.T, client types.Client, + volumeID, volumeName string) *types.Volume { + // Prepare request for storage driver call to copy volume + fields := map[string]interface{}{ + "volumeID": volumeID, + "volumeName": volumeName, + } + log.WithFields(fields).Info("copying volume") + + opts := map[string]interface{}{ + "priority": 2, + "owner": "root@example.com", + } + + volumeCopyRequest := &types.VolumeCopyRequest{ + VolumeName: volumeName, + //Opts: opts, + } + + // Send request and retrieve created libStorage types.Volume + reply, err := client.API().VolumeCopy(nil, + ebs.Name, volumeID, volumeCopyRequest) + assert.NoError(t, err) + if err != nil { + t.FailNow() + t.Error("failed volumeCopy") + } + apitests.LogAsJSON(reply, t) + + // Check if inputted volume name is the same as the created volume's + assert.Equal(t, volumeName, reply.Name) + + return reply +} +*/ diff --git a/drivers/storage/libstorage/libstorage_driver_funcs.go b/drivers/storage/libstorage/libstorage_driver_funcs.go index 08290f0d..668b6c30 100644 --- a/drivers/storage/libstorage/libstorage_driver_funcs.go +++ b/drivers/storage/libstorage/libstorage_driver_funcs.go @@ -116,6 +116,7 @@ func (d *driver) VolumeCreate( req := &types.VolumeCreateRequest{ Name: name, AvailabilityZone: opts.AvailabilityZone, + Encrypted: opts.Encrypted, IOPS: opts.IOPS, Size: opts.Size, Type: opts.Type, diff --git a/glide.yaml b/glide.yaml index 6da95429..a2c388b4 100644 --- a/glide.yaml +++ b/glide.yaml @@ -38,7 +38,7 @@ import: - package: github.com/emccode/goisilon version: v1.4.0 -### EFS +### EFS and EBS - package: github.com/aws/aws-sdk-go version: v1.2.2 repo: https://github.com/aws/aws-sdk-go diff --git a/imports/executors/imports_executor.go b/imports/executors/imports_executor.go index ab1cfd37..b37912d6 100644 --- a/imports/executors/imports_executor.go +++ b/imports/executors/imports_executor.go @@ -2,7 +2,7 @@ package executors import ( // load the storage executors - //_ "github.com/emccode/libstorage/drivers/storage/ec2/executor" + _ "github.com/emccode/libstorage/drivers/storage/ebs/executor" _ "github.com/emccode/libstorage/drivers/storage/efs/executor" //_ "github.com/emccode/libstorage/drivers/storage/gce/executor" _ "github.com/emccode/libstorage/drivers/storage/isilon/executor" diff --git a/imports/remote/imports_remote.go b/imports/remote/imports_remote.go index 6e83b0b1..01880865 100644 --- a/imports/remote/imports_remote.go +++ b/imports/remote/imports_remote.go @@ -2,6 +2,7 @@ package remote import ( // import to load + _ "github.com/emccode/libstorage/drivers/storage/ebs/storage" _ "github.com/emccode/libstorage/drivers/storage/efs/storage" _ "github.com/emccode/libstorage/drivers/storage/isilon/storage" _ "github.com/emccode/libstorage/drivers/storage/rackspace/storage" diff --git a/libstorage.json b/libstorage.json index 921b7f77..28bfa274 100644 --- a/libstorage.json +++ b/libstorage.json @@ -31,6 +31,10 @@ "type": "string", "description": "The zone for which the volume is available." }, + "encrypted": { + "type": "boolean", + "description": "A flag indicating whether or not the volume is encrypted." + }, "iops": { "type": "number", "description": "The volume IOPs." @@ -152,6 +156,10 @@ "type": "string", "description": "A description of the snapshot." }, + "encrypted": { + "type": "boolean", + "description": "A flag indicating whether or not the snapshot is encrypted." + }, "startTime": { "type": "number", "description": "The time (epoch) at which the request to create the snapshot was submitted." @@ -415,6 +423,9 @@ "availabilityZone": { "type": "string" }, + "encrypted": { + "type": "boolean" + }, "iops": { "type": "number" },