diff --git a/.gitignore b/.gitignore index ba5090c9..b154780d 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,10 @@ -csi-scaleio + +/csi-vxflexos +helm/.ps.out +service/c.out +service/test/ +test/integration/c.linux.out +test/integration/stderr +semver.mk +goscaleio/ +gofsutil/ diff --git a/CSI Driver for Dell EMC VxFlex OS Product Guide.pdf b/CSI Driver for Dell EMC VxFlex OS Product Guide.pdf index af20ef33..9a8f7784 100644 Binary files a/CSI Driver for Dell EMC VxFlex OS Product Guide.pdf and b/CSI Driver for Dell EMC VxFlex OS Product Guide.pdf differ diff --git a/CSI Driver for Dell EMC VxFlex OS Release Notes.pdf b/CSI Driver for Dell EMC VxFlex OS Release Notes.pdf index 9fb968e0..d566724c 100644 Binary files a/CSI Driver for Dell EMC VxFlex OS Release Notes.pdf and b/CSI Driver for Dell EMC VxFlex OS Release Notes.pdf differ diff --git a/Dockerfile b/Dockerfile index 1c861f28..f0bbcd5c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,71 @@ -FROM centos:7.6.1810 -RUN yum install -y libaio -RUN yum install -y libuuid -RUN yum install -y numactl -RUN yum install -y xfsprogs -RUN yum install -y e4fsprogs -COPY "csi-vxflexos" . -COPY "csi-vxflexos.sh" . -RUN chmod +x csi-vxflexos.sh +# some arguments that must be supplied +ARG GOPROXY +ARG GOVERSION +ARG BASEIMAGE + + +# Stage to build the driver +FROM golang:${GOVERSION} as builder +ARG GOPROXY +RUN mkdir -p /go/src +COPY ./ /go/src/ +WORKDIR /go/src/ +RUN CGO_ENABLED=0 \ + make build + +# Stage to build the driver image +FROM $BASEIMAGE AS driver +# install necessary packages +# alphabetical order for easier maintenance +RUN yum update -y && \ + yum install -y \ + e4fsprogs \ + libaio \ + libuuid \ + numactl \ + xfsprogs && \ + yum clean all ENTRYPOINT ["/csi-vxflexos.sh"] +# copy in the driver +COPY --from=builder /go/src/csi-vxflexos / +COPY "csi-vxflexos.sh" / +RUN chmod +x /csi-vxflexos.sh + +# stage to run gosec +FROM builder as gosec +RUN go get github.com/securego/gosec/cmd/gosec +RUN cd /go/src && \ + gosec ./... + +# Stage to check for critical and high CVE issues via Trivy (https://github.com/aquasecurity/trivy) +# will break image build if CRITICAL issues found +# will print out all HIGH issues found +FROM driver as cvescan +# run trivy and clean up all traces after +RUN curl https://raw.githubusercontent.com/aquasecurity/trivy/master/contrib/install.sh | sh && \ + trivy fs -s CRITICAL --exit-code 1 / && \ + trivy fs -s HIGH / && \ + trivy image --reset && \ + rm ./bin/trivy + +# Stage to run antivirus scans via clamav (https://www.clamav.net/)) +# will break image build if anything found +FROM driver as virusscan +# run trivy and clean up all traces after +RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ + yum install -y clamav clamav-update && \ + freshclam && \ + clamscan -r -i --exclude-dir=/sys / && \ + yum erase -y clamav clamav-update epel-release + +# final stage +# simple stage to use the driver image as the resultant image +FROM driver as final + +LABEL vendor="Dell Inc." \ + name="csi-powerflex" \ + summary="CSI Driver for Dell EMC PowerFlex" \ + description="CSI Driver for provisioning persistent storage from Dell EMC PowerFlex" \ + version="1.2.0" \ + license="Apache-2.0" +COPY ./licenses /licenses diff --git a/Makefile b/Makefile index 40edea50..45e7d830 100644 --- a/Makefile +++ b/Makefile @@ -1,37 +1,45 @@ -all: clean build - -# Tag parameters -MAJOR=1 -MINOR=0 -PATCH=0 -NOTES= -TAGMSG="CSI Spec 1.0" - +# default target +all: help + +# include an overrides file, which sets up default values and allows user overrides +include overrides.mk + +# Help target, prints usefule information +help: + @echo + @echo "The following targets are commonly used:" + @echo + @echo "build - Builds the code locally" + @echo "check - Runs the suite of code checking tools: lint, format, etc" + @echo "clean - Cleans the local build" + @echo "docker - Builds the code within a golang container and then creates the driver image" + @echo "integration-test - Runs the integration tests. Requires access to an array" + @echo "push - Pushes the built container to a target registry" + @echo "unit-test - Runs the unit tests" + @echo + @make -s overrides-help + +# Clean the build clean: rm -f core/core_generated.go + rm -f semver.mk go clean -build: - go generate - CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build - -install: +# Dependencies +dependencies: go generate - GOOS=linux CGO_ENABLED=0 go install + go run core/semver/semver.go -f mk >semver.mk -# Tags the release with the Tag parameters set above -tag: - -git tag -d v$(MAJOR).$(MINOR).$(PATCH)$(NOTES) - git tag -a -m $(TAGMSG) v$(MAJOR).$(MINOR).$(PATCH)$(NOTES) +# Build the driver locally +build: dependencies + CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build # Generates the docker container (but does not push) -docker: - go generate - go run core/semver/semver.go -f mk >semver.mk +docker: dependencies make -f docker.mk docker # Pushes container to the repository -push: docker +push: docker make -f docker.mk push # Windows or Linux; requires no hardware diff --git a/README.md b/README.md index 43c37690..78d83696 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,25 @@ -# CSI Driver for VxFlex OS +# CSI Driver for PowerFlex (Formerly VxFlex OS) + +[![Go Report Card](https://goreportcard.com/badge/github.com/dell/csi-vxflexos)](https://goreportcard.com/report/github.com/dell/csi-vxflexos) +[![License](https://img.shields.io/github/license/dell/csi-vxflexos)](https://github.com/dell/csi-vxflexos/blob/master/LICENSE) +[![Docker](https://img.shields.io/docker/pulls/dellemc/csi-vxflexos.svg?logo=docker)](https://hub.docker.com/r/dellemc/csi-vxflexos) +[![Last Release](https://img.shields.io/github/v/release/dell/csi-vxflexos?label=latest&style=flat-square)](https://github.com/dell/csi-vxflexos/releases) ## Description -CSI Driver for VxFlex OS is a Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec)) -driver that provides VxFlex OS support. It supports CSI specification version 1.1. +CSI Driver for PowerFlex is a Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec)) +driver that provides PowerFlex support. It supports CSI specification version 1.1. This project may be compiled as a stand-alone binary using Golang that, when run, provides a valid CSI endpoint. This project can also be built as a Golang plug-in in order to extend the functionality of other programs. +## Support +The CSI Driver for Dell EMC PowerFlex image, which is the built driver code, is available on Dockerhub and is officially supported by Dell EMC. + +The source code for CSI Driver for Dell EMC PowerFlex available on Github is unsupported and provided solely under the terms of the license attached to the source code. For clarity, Dell EMC does not provide support for any source code modifications. + +For any CSI driver issues, questions or feedback, join the [Dell EMC Container community](https://www.dell.com/community/Containers/bd-p/Containers). + ## Building This project is a Go module (see golang.org Module information for explanation). @@ -20,24 +32,20 @@ To run unit tests, execute `make unit-test`. To build a docker image, execute `make docker`. You can run an integration test on a Linux system by populating the file `env.sh` -with values for your VxFlex OS system and then run "make integration-test". +with values for your PowerFlex system and then run "make integration-test". ## Runtime Dependencies The Node portion of the driver can be run on any node that is configured as a -VxFlex OS SDC. This means that the `scini` kernel module must be loaded. Also, +PowerFlex SDC. This means that the `scini` kernel module must be loaded. Also, if the `X_CSI_VXFLEXOS_SDCGUID` environment variable is not set, the driver will try to query the SDC GUID by executing the binary `/opt/emc/scaleio/sdc/bin/drv_cfg`. If that binary is not present, the Node Service cannot be run. ## Installation +Installation in a Kubernetes cluster should be done using the scripts within the `dell-csi-helm-installer` directory. -Installation in Kubernetes should be done using the `install.vxflexos` script -and accompanying Helm chart in the helm directory. For more information, please refer -to the `CSI Driver for VxFlex OS Product Guide` and `CSI Driver for VxFlex OS Release Notes`. -The driver will be started in Kubernetes as a result of executing the installation -script. - +For more information, consult the [README.md](dell-csi-helm-installer/README.md) ## Using driver @@ -121,12 +129,3 @@ This means that giving a workload read-only access to a block device is not supported. In general, volumes should be formatted with xfs or ext4. - -## Support -The CSI Driver for Dell EMC VxFlex OS image available on Dockerhub is officially supported by Dell EMC. - -The source code available on Github is unsupported and provided solely under the terms of the license attached to the source code. For clarity, Dell EMC does not provide support for any source code modifications. - -For any CSI driver setup, configuration issues, questions or feedback, join the Dell EMC Container community athttps://www.dell.com/community/Containers/bd-p/Containers - -For any Dell EMC storage issues, please contact Dell support at: https://www.dell.com/support. diff --git a/core/semver/semver.go b/core/semver/semver.go index d4d17f11..5da4cf2b 100644 --- a/core/semver/semver.go +++ b/core/semver/semver.go @@ -56,6 +56,7 @@ func main() { } else if strings.EqualFold("ver", format) { format = "ver" } else { + /* #nosec G304 */ if fileExists(format) { buf, err := ioutil.ReadFile(format) if err != nil { @@ -76,6 +77,7 @@ func main() { os.Exit(1) } w = fout + /* #nosec G307 */ defer fout.Close() } @@ -163,6 +165,7 @@ func main() { } func doExec(cmd string, args ...string) ([]byte, error) { + /* #nosec G204 */ c := exec.Command(cmd, args...) c.Stderr = os.Stderr return c.Output() diff --git a/dell-csi-helm-installer/README.md b/dell-csi-helm-installer/README.md new file mode 100644 index 00000000..211717a4 --- /dev/null +++ b/dell-csi-helm-installer/README.md @@ -0,0 +1,157 @@ +# Helm Installer for Dell EMC CSI Storage Providers + +## Description + +This directory provides scripts to install, upgrade, uninstall the CSI drivers, and to verify the Kubernetes environment. +These same scripts are present in all Dell EMC Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec)) drivers. This includes the drivers for: +* [PowerFlex](https://github.com/dell/csi-vxflexos) +* [PowerMax](https://github.com/dell/csi-powermax) +* [PowerScale](https://github.com/dell/csi-powerscale) +* [PowerStore](https://github.com/dell/csi-powerstore) +* [Unity](https://github.com/dell/csi-unity) + +NOTE: This documentation uses the PowerFlex driver as an example. If working with a different driver, substitute the name as appropriate. + +## Dependencies + +Installing any of the Dell EMC CSI Drivers requires a few utilities to be installed on the system running the installation. + +| Dependency | Usage | +| ------------- | ----- | +| `kubectl` | Kubectl is used to validate that the Kubernetes system meets the requirements of the driver. | +| `helm` | Helm v3 is used as the deployment tool for Charts. See, [Install HELM 3](https://helm.sh/docs/intro/install/) for instructions to install HELM 3. | + + +In order to use these tools, a valid `KUBECONFIG` is required. Ensure that either a valid configuration is in the default location or that the `KUBECONFIG` environment variable points to a valid confiugration before using these tools. + +## Capabilities + +This project provides the following capabilitites, each one is discussed in detail later in this document. + +* Install a driver. When installing a driver, options are provided to specify the target namespace as well as options to control the types of verifications to be performed on the target system. +* Upgrade a driver. Upgrading a driver is an effective way to either deploy a new version of the driver or to modify the parameters used in an initial deployment. +* Uninstall a driver. This removes the driver and any installed storage classes. +* Verify a Kubernetes system for suitability with a driver. These verification steps differ, slightly, from driver to driver but include verifiying version compatibility, namespace availability, existance of required secrets, and validating worker node compatibility with driver protocols such as iSCSI, Fibre Channel, NFS, etc + + +Most of these usages require the creation/specification of a values file. These files specify configuration settings that are passed into the driver and configure it for use. To create one of these files, the following steps should be followed: +1. Copy a template file for the driver to a new location, naming this new file is at the users discretion. The template files are always found within the driver repo at `helm/csi-/values.yaml` +2. Edit the file such that it contains the proper configuration settings for the specific environment. These files are yaml formatted so maintaining the file structure is important. + +For example, to create a values file for the PowerFlex driver the following steps can be executed +``` +# cd to the installation script directory +cd dell-csi-helm-installer + +# copy the template file +cp ../helm/csi-vxflexos/values.yaml ./my-vxflexos-settings.yaml + +# edit the newly created values file +vi my-vxflexos-settings.yaml +``` + +These values files can then be archived for later reference or for usage when upgrading the driver. + + +### Install A Driver + +Installing a driver is performed via the `csi-install.sh` script. This script requires a few arguments: the target namespace and the user created values file. By default, this will verify the Kubernetes environment and present a list of warnings and/or errors. Errors must be addressed before installing, warning should be examined for their applicability. For example, in order to install the PowerFlex driver into a namespace called "vxflexos", the following command should be run: +``` +./csi-install.sh --namespace vxflexos --values ./my-vxflexos-settings.yaml +``` + +For usage information: +``` +[dell-csi-helm-installer]# ./csi-install.sh -h +Help for ./csi-install.sh + +Usage: ./csi-install.sh options... +Options: + Required + --namespace[=] Kubernetes namespace containing the CSI driver + --values[=] Values file, which defines configuration values + Optional + --release[=] Name to register with helm, default value will match the driver name + --upgrade Perform an upgrade of the specified driver, default is false + --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root + --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification + --skip-verify-node Skip worker node verification checks + --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes. + -h Help +``` + +### Upgrade A Driver + +Upgrading a driver is very similar to installation. The `csi-install.sh` script is run, with the same required arguments, along with a `--upgrade` argument. For example, to upgrade the previously installed PowerFlex driver, the following command can be supplied: + +``` +./csi-install.sh --namespace vxflexos --values ./my-vxflexos-settings.yaml --upgrade +``` + +For usage information: +``` +[dell-csi-helm-installer]# ./csi-install.sh -h +Help for ./csi-install.sh + +Usage: ./csi-install.sh options... +Options: + Required + --namespace[=] Kubernetes namespace containing the CSI driver + --values[=] Values file, which defines configuration values + Optional + --release[=] Name to register with helm, default value will match the driver name + --upgrade Perform an upgrade of the specified driver, default is false + --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root + --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification + --skip-verify-node Skip worker node verification checks + --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes. + -h Help +``` + +### Uninstall A Driver + +To uninstall a driver, the `csi-uninstall.sh` script provides a handy wrapper around the `helm` utility. The only required argument for uninstallation is the namespace name. To uninstall the PowerFlex driver: + +``` +./csi-uninstall.sh --namespace vxflexos +``` + +For usage information: +``` +[dell-csi-helm-installer]# ./csi-uninstall.sh -h +Help for ./csi-uninstall.sh + +Usage: ./csi-uninstall.sh options... +Options: + Required + --namespace[=] Kubernetes namespace to uninstall the CSI driver from + Optional + --release[=] Name to register with helm, default value will match the driver name + -h Help +``` + +### Verify A Kubernetes Environment + +The `verify.sh` script is run, automatically, as part of the installation and upgrade procedures and can also be run by itself. This provides a handy means to validate a Kubernetes system without meaning to actually perform the installation. To verify an environment, run `verify.sh` with the namespace name and values file options. + +``` +./verify.sh --namespace vxflexos --values ./my-vxflexos-settings.yaml +``` + +For usage information: +``` +[dell-csi-helm-installer]# ./verify.sh -h +Help for ./verify.sh + +Usage: ./verify.sh options... +Options: + Required + --namespace[=] Kubernetes namespace to install the CSI driver + --values[=] Values file, which defines configuration values + Optional + --skip-verify-node Skip worker node verification checks + --release[=] Name to register with helm, default value will match the driver name + --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root + -h Help Help +``` + diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml new file mode 100644 index 00000000..4aa980cc --- /dev/null +++ b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml @@ -0,0 +1,85 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshotclasses.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .driver + name: Driver + type: string + - JSONPath: .deletionPolicy + description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass + should be deleted when its bound VolumeSnapshot is deleted. + name: DeletionPolicy + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + preserveUnknownFields: false + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created + through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot + is deleted. Supported values are "Retain" and "Delete". "Retain" means + that the VolumeSnapshotContent and its physical snapshot on underlying + storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml new file mode 100644 index 00000000..34c51ad6 --- /dev/null +++ b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml @@ -0,0 +1,233 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshotcontents.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot in bytes + name: RestoreSize + type: integer + - JSONPath: .spec.deletionPolicy + description: Determines whether this VolumeSnapshotContent and its physical snapshot + on the underlying storage system should be deleted when its bound VolumeSnapshot + is deleted. + name: DeletionPolicy + type: string + - JSONPath: .spec.driver + description: Name of the CSI driver used to create the physical snapshot on the + underlying storage system. + name: Driver + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: Name of the VolumeSnapshotClass to which this snapshot belongs. + name: VolumeSnapshotClass + type: string + - JSONPath: .spec.volumeSnapshotRef.name + description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent + object is bound. + name: VolumeSnapshot + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. "Delete" + means that the VolumeSnapshotContent and its physical snapshot on + underlying storage system are deleted. In dynamic snapshot creation + case, this field will be filled in with the "DeletionPolicy" field + defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For + pre-existing snapshots, users MUST specify this field when creating + the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be the + same as the name returned by the CSI GetPluginName() call for that + driver. Required. + type: string + source: + description: source specifies from where a snapshot will be created. + This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a + pre-existing snapshot on the underlying storage system. This field + is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume + from which a snapshot should be dynamically taken from. This field + is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass to which this snapshot + belongs. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to + which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be provided + for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates the creation time is unknown. The + format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command `date +%s%N` returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the latest observed error during snapshot creation, + if any. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on + the underlying storage system. If not specified, it indicates that + dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml new file mode 100644 index 00000000..483706f1 --- /dev/null +++ b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml @@ -0,0 +1,188 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.5 + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" + creationTimestamp: null + name: volumesnapshots.snapshot.storage.k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .status.readyToUse + description: Indicates if a snapshot is ready to be used to restore a volume. + name: ReadyToUse + type: boolean + - JSONPath: .spec.source.persistentVolumeClaimName + description: Name of the source PVC from where a dynamically taken snapshot will + be created. + name: SourcePVC + type: string + - JSONPath: .spec.source.volumeSnapshotContentName + description: Name of the VolumeSnapshotContent which represents a pre-provisioned + snapshot. + name: SourceSnapshotContent + type: string + - JSONPath: .status.restoreSize + description: Represents the complete size of the snapshot. + name: RestoreSize + type: string + - JSONPath: .spec.volumeSnapshotClassName + description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. + name: SnapshotClass + type: string + - JSONPath: .status.boundVolumeSnapshotContentName + description: The name of the VolumeSnapshotContent to which this VolumeSnapshot + is bound. + name: SnapshotContent + type: string + - JSONPath: .status.creationTime + description: Timestamp when the point-in-time snapshot is taken by the underlying + storage system. + name: CreationTime + type: date + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object in the same namespace as the VolumeSnapshot + object where the snapshot should be dynamically taken from. This + field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing + VolumeSnapshotContent object. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. If not specified, the default snapshot + class will be used if one exists. If not specified, and there is no + default snapshot class, dynamic snapshot creation will fail. Empty + string is not allowed for this field. TODO(xiangqian): a webhook validation + on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' + type: string + required: + - source + type: object + status: + description: 'status represents the current information of a snapshot. NOTE: + status can be modified by sources other than system controllers, and must + not be depended upon for accuracy. Controllers should only use information + from the VolumeSnapshotContent object after verifying that the binding + is accurate and complete.' + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName represents the name of + the VolumeSnapshotContent object to which the VolumeSnapshot object + is bound. If not specified, it indicates that the VolumeSnapshot object + has not been successfully bound to a VolumeSnapshotContent object + yet. NOTE: Specified boundVolumeSnapshotContentName alone does not + mean binding is valid. Controllers MUST always verify bidirectional + binding between VolumeSnapshot and VolumeSnapshotContent to + avoid possible security issues.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates that the creation time of the snapshot + is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + anyOf: + - type: integer + - type: string + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/dell-csi-helm-installer/common.sh b/dell-csi-helm-installer/common.sh new file mode 100644 index 00000000..f4b8730e --- /dev/null +++ b/dell-csi-helm-installer/common.sh @@ -0,0 +1,114 @@ +#!/bin/bash +# +# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +DRIVERDIR="${SCRIPTDIR}/../helm" + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +DARK_GRAY='\033[1;30m' +NC='\033[0m' # No Color + +function log() { + case $1 in + separator) + echo "------------------------------------------------------" + ;; + error) + echo + log separator + printf "${RED}Error: $2\n" + printf "${RED}Installation cannot continue${NC}\n" + exit 1 + ;; + step) + printf "|\n|- %-65s" "$2" + ;; + small_step) + printf "%-61s" "$2" + ;; + section) + log separator + printf "> %s\n" "$2" + log separator + ;; + smart_step) + if [[ $3 == "small" ]]; then + log small_step "$2" + else + log step "$2" + fi + ;; + arrow) + printf " %s\n %s" "|" "|--> " + ;; + step_success) + printf "${GREEN}Success${NC}\n" + ;; + step_failure) + printf "${RED}Failed${NC}\n" + ;; + step_warning) + printf "${YELLOW}Warning${NC}\n" + ;; + info) + printf "${DARK_GRAY}%s${NC}\n" "$2" + ;; + passed) + printf "${GREEN}Success${NC}\n" + ;; + warnings) + printf "${YELLOW}Warnings:${NC}\n" + ;; + errors) + printf "${RED}Errors:${NC}\n" + ;; + *) + echo -n "Unknown" + ;; + esac +} + +function check_error() { + if [[ $1 -ne 0 ]]; then + log step_failure + else + log step_success + fi +} + +# +# get_drivers will populate an array of drivers found by +# enumerating the directories in drivers/ that contain a helm chart +function get_drivers() { + D="${1}" + TTT=$(pwd) + while read -r line; do + DDD=$(echo $line | awk -F '/' '{print $(NF-1)}') + VALIDDRIVERS+=("$DDD") + done < <(find "${D}" -maxdepth 2 -type f -name Chart.yaml | sort) +} + +# +# get_release will determine the helm release name to use +# If ${RELEASE} is set, use that +# Otherwise, use the driver name minus any "csi-" prefix +# argument 1: Driver name +function get_release_name() { + local D="${1}" + if [ ! -z "${RELEASE}" ]; then + echo "${RELEASE}" + return + fi + + local PREFIX="csi-" + R=${D#"$PREFIX"} + echo "${R}" +} diff --git a/dell-csi-helm-installer/csi-install.sh b/dell-csi-helm-installer/csi-install.sh new file mode 100755 index 00000000..5445960c --- /dev/null +++ b/dell-csi-helm-installer/csi-install.sh @@ -0,0 +1,391 @@ +#!/bin/bash +# +# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +DRIVERDIR="${SCRIPTDIR}/../helm" +VERIFYSCRIPT="${SCRIPTDIR}/verify.sh" +SNAPCLASSDIR="${SCRIPTDIR}/beta-snapshot-crd" +PROG="${0}" +NODE_VERIFY=1 +VERIFY=1 +MODE="install" +# version of Snapshot CRD to install. Default is none ("") +INSTALL_CRD="" + +declare -a VALIDDRIVERS + +source "$SCRIPTDIR"/common.sh + + +# +# usage will print command execution help and then exit +function usage() { + echo + echo "Help for $PROG" + echo + echo "Usage: $PROG options..." + echo "Options:" + echo " Required" + echo " --namespace[=] Kubernetes namespace containing the CSI driver" + echo " --values[=] Values file, which defines configuration values" + + echo " Optional" + echo " --release[=] Name to register with helm, default value will match the driver name" + echo " --upgrade Perform an upgrade of the specified driver, default is false" + echo " --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root" + echo " --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification" + echo " --skip-verify-node Skip worker node verification checks" + echo " --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes." + echo " -h Help" + echo + + exit 0 +} + +# warning, with an option for users to continue +function warning() { + log separator + printf "${YELLOW}WARNING:${NC}\n" + for N in "$@"; do + echo $N + done + echo + if [ "${ASSUMEYES}" == "true" ]; then + echo "Continuing as '-Y' argument was supplied" + return + fi + read -n 1 -p "Press 'y' to continue or any other key to exit: " CONT + echo + if [ "${CONT}" != "Y" -a "${CONT}" != "y" ]; then + echo "quitting at user request" + exit 2 + fi +} + + +# print header information +function header() { + log section "Installing CSI Driver: ${DRIVER} on ${kMajorVersion}.${kMinorVersion}" +} + +# +# check_for_driver will see if the driver is already installed within the namespace provided +function check_for_driver() { + log section "Checking to see if CSI Driver is already installed" + NUM=$(helm list --namespace "${NS}" | grep "^${RELEASE}\b" | wc -l) + if [ "${1}" == "install" -a "${NUM}" != "0" ]; then + log error "The CSI Driver is already installed" + fi + if [ "${1}" == "upgrade" -a "${NUM}" == "0" ]; then + log error "The CSI Driver is not installed" + fi +} + +# +# validate_params will validate the parameters passed in +function validate_params() { + # make sure the driver was specified + if [ -z "${DRIVER}" ]; then + echo "No driver specified" + usage + exit 1 + fi + # make sure the driver name is valid + if [[ ! "${VALIDDRIVERS[@]}" =~ "${DRIVER}" ]]; then + echo "Driver: ${DRIVER} is invalid." + echo "Valid options are: ${VALIDDRIVERS[@]}" + usage + exit 1 + fi + # the namespace is required + if [ -z "${NS}" ]; then + echo "No namespace specified" + usage + exit 1 + fi + # values file + if [ -z "${VALUES}" ]; then + echo "No values file was specified" + usage + exit 1 + fi + if [ ! -f "${VALUES}" ]; then + echo "Unable to read values file at: ${VALUES}" + usage + exit 1 + fi +} + +# +# install_driver uses helm to install the driver with a given name +function install_driver() { + if [ "${1}" == "upgrade" ]; then + log step "Upgrading Driver" + else + log step "Installing Driver" + fi + + HELMOUTPUT="/tmp/csi-install.$$.out" + helm ${1} --values "${DRIVERDIR}/${DRIVER}/k8s-${kMajorVersion}.${kMinorVersion}-values.yaml" --values "${DRIVERDIR}/${DRIVER}/driver-image.yaml" --values "${VALUES}" --namespace ${NS} "${RELEASE}" "${DRIVERDIR}/${DRIVER}" >"${HELMOUTPUT}" 2>&1 + if [ $? -ne 0 ]; then + cat "${HELMOUTPUT}" + log error "Helm operation failed, output can be found in ${HELMOUTPUT}. The failure should be examined, before proceeding. Additionally, running csi-uninstall.sh may be needed to clean up partial deployments." + fi + log step_success + # wait for the deployment to finish, use the default timeout + waitOnRunning "${NS}" "statefulset ${RELEASE}-controller,daemonset ${RELEASE}-node" + if [ $? -eq 1 ]; then + warning "Timed out waiting for the operation to complete." \ + "This does not indicate a fatal error, pods may take a while to start." \ + "Progress can be checked by running \"kubectl get pods -n ${NS}\"" + fi +} + +# Print a nice summary at the end +function summary() { + log section "Operation complete" +} + +# waitOnRunning +# will wait, for a timeout period, for a number of pods to go into Running state within a namespace +# arguments: +# $1: required: namespace to watch +# $2: required: comma seperated list of deployment type and name pairs +# for example: "statefulset mystatefulset,daemonset mydaemonset" +# $3: optional: timeout value, 300 seconds is the default. +function waitOnRunning() { + if [ -z "${2}" ]; then + echo "No namespace and/or list of deployments was supplied. This field is required for waitOnRunning" + return 1 + fi + # namespace + local NS="${1}" + # pods + IFS="," read -r -a PODS <<<"${2}" + # timeout value passed in, or 300 seconds as a default + local TIMEOUT="300" + if [ -n "${3}" ]; then + TIMEOUT="${3}" + fi + + error=0 + for D in "${PODS[@]}"; do + log arrow + log smart_step "Waiting for $D to be ready" "small" + kubectl -n "${NS}" rollout status --timeout=${TIMEOUT}s ${D} >/dev/null 2>&1 + if [ $? -ne 0 ]; then + error=1 + log step_failure + else + log step_success + fi + done + + if [ $error -ne 0 ]; then + return 1 + fi + return 0 +} + +function kubectl_safe() { + eval "kubectl $1" + exitcode=$? + if [[ $exitcode != 0 ]]; then + echo "$2" + exit $exitcode + fi +} + +# +# install_snapshot_crds +# Downloads and installs snapshot CRDs +function install_snapshot_crd() { + if [ "${INSTALL_CRD}" == "" ]; then + return + fi + log step "Checking and installing snapshot crds" + + declare -A SNAPCLASSES=( + ["volumesnapshotclasses"]="snapshot.storage.k8s.io_volumesnapshotclasses.yaml" + ["volumesnapshotcontents"]="snapshot.storage.k8s.io_volumesnapshotcontents.yaml" + ["volumesnapshots"]="snapshot.storage.k8s.io_volumesnapshots.yaml" + ) + + for C in "${!SNAPCLASSES[@]}"; do + F="${SNAPCLASSES[$C]}" + # check if custom resource exists + kubectl_safe "get customresourcedefinitions" "Failed to get crds" | grep "${C}" --quiet + + if [[ $? -ne 0 ]]; then + # make sure CRD exists + if [ ! -f "${SNAPCLASSDIR}/${SNAPCLASSES[$C]}" ]; then + echo "Unable to to find Snapshot Classes at ${SNAPCLASSDIR}" + exit 1 + fi + # create the custom resource + kubectl_safe "create -f ${SNAPCLASSDIR}/${SNAPCLASSES[$C]}" "Failed to create Volume Snapshot Beta CRD: ${C}" + fi + done + + sleep 10s + log step_success +} + +# +# verify_kubernetes +# will run a driver specific function to verify environmental requirements +function verify_kubernetes() { + EXTRA_OPTS="" + if [ $VERIFY -eq 0 ]; then + echo "Skipping verification at user request" + else + if [ $NODE_VERIFY -eq 0 ]; then + EXTRA_OPTS="$EXTRA_OPTS --skip-verify-node" + fi + if [ "${INSTALL_CRD}" == "yes" ]; then + EXTRA_OPTS="$EXTRA_OPTS --snapshot-crd" + fi + "${VERIFYSCRIPT}" --namespace "${NS}" --release "${RELEASE}" --values "${VALUES}" --node-verify-user "${NODEUSER}" ${EXTRA_OPTS} + VERIFYRC=$? + case $VERIFYRC in + 0) ;; + + 1) + warning "Kubernetes validation failed but installation can continue. " \ + "This may affect driver installation." + ;; + *) + log error "Kubernetes validation failed." + ;; + esac + fi +} + +# +# main +# +VERIFYOPTS="" +ASSUMEYES="false" + +# get the list of valid CSI Drivers, this will be the list of directories in drivers/ that contain helm charts +get_drivers "${DRIVERDIR}" +# if only one driver was found, set the DRIVER to that one +if [ ${#VALIDDRIVERS[@]} -eq 1 ]; then + DRIVER="${VALIDDRIVERS[0]}" +fi + +while getopts ":h-:" optchar; do + case "${optchar}" in + -) + case "${OPTARG}" in + skip-verify) + VERIFY=0 + ;; + skip-verify-node) + NODE_VERIFY=0 + ;; + # SNAPSHOT_CRD + snapshot-crd) + INSTALL_CRD="yes" + ;; + upgrade) + MODE="upgrade" + ;; + # NAMESPACE + namespace) + NS="${!OPTIND}" + if [[ -z ${NS} || ${NS} == "--skip-verify" ]]; then + NS=${DEFAULT_NS} + else + OPTIND=$((OPTIND + 1)) + fi + ;; + namespace=*) + NS=${OPTARG#*=} + if [[ -z ${NS} ]]; then NS=${DEFAULT_NS}; fi + ;; + # RELEASE + release) + RELEASE="${!OPTIND}" + OPTIND=$((OPTIND + 1)) + ;; + release=*) + RELEASE=${OPTARG#*=} + ;; + # VALUES + values) + VALUES="${!OPTIND}" + OPTIND=$((OPTIND + 1)) + ;; + values=*) + VALUES=${OPTARG#*=} + ;; + # NODEUSER + node-verify-user) + NODEUSER="${!OPTIND}" + OPTIND=$((OPTIND + 1)) + ;; + node-verify-user=*) + HODEUSER=${OPTARG#*=} + ;; + *) + echo "Unknown option --${OPTARG}" + echo "For help, run $PROG -h" + exit 1 + ;; + esac + ;; + h) + usage + ;; + *) + echo "Unknown option -${OPTARG}" + echo "For help, run $PROG -h" + exit 1 + ;; + esac +done + +# by default the NAME of the helm release of the driver is the same as the driver name +RELEASE=$(get_release_name "${DRIVER}") +# by default, NODEUSER is root +NODEUSER="${NODEUSER:-root}" + +# make sure kubectl is available +kubectl --help >&/dev/null || { + echo "kubectl required for installation... exiting" + exit 2 +} +# make sure helm is available +helm --help >&/dev/null || { + echo "helm required for installation... exiting" + exit 2 +} + +# Get the kubernetes major and minor version numbers. +kMajorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Major:"//' -e 's/[^0-9].*//g') +kMinorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Minor:"//' -e 's/[^0-9].*//g') + +# validate the parameters passed in +validate_params "${MODE}" + +header +check_for_driver "${MODE}" +verify_kubernetes + +if [[ "${INSTALL_CRD}" != "" ]]; then + install_snapshot_crd +fi + + +# all good, keep processing +install_driver "${MODE}" + +summary diff --git a/dell-csi-helm-installer/csi-uninstall.sh b/dell-csi-helm-installer/csi-uninstall.sh new file mode 100755 index 00000000..e3e8e5cf --- /dev/null +++ b/dell-csi-helm-installer/csi-uninstall.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# +# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +DRIVERDIR="${SCRIPTDIR}/../helm" +PROG="${0}" + +declare -a VALIDDRIVERS + +source "$SCRIPTDIR"/common.sh + +# +# usage will print command execution help and then exit +function usage() { + echo "Help for $PROG" + echo + echo "Usage: $PROG options..." + echo "Options:" + echo " Required" + echo " --namespace[=] Kubernetes namespace to uninstall the CSI driver from" + + echo " Optional" + echo " --release[=] Name to register with helm, default value will match the driver name" + echo " -h Help" + echo + + exit 0 +} + + + +# +# validate_params will validate the parameters passed in +function validate_params() { + # make sure the driver was specified + if [ -z "${DRIVER}" ]; then + echo "No driver specified" + exit 1 + fi + # make sure the driver name is valid + if [[ ! "${VALIDDRIVERS[@]}" =~ "${DRIVER}" ]]; then + echo "Driver: ${DRIVER} is invalid." + echo "Valid options are: ${VALIDDRIVERS[@]}" + exit 1 + fi + # the namespace is required + if [ -z "${NAMESPACE}" ]; then + echo "No namespace specified" + exit 1 + fi +} + + +# check_for_driver will see if the driver is installed within the namespace provided +function check_for_driver() { + NUM=$(helm list --namespace "${NAMESPACE}" | grep "^${RELEASE}\b" | wc -l) + if [ "${NUM}" == "0" ]; then + echo "The CSI Driver is not installed." + exit 1 + fi +} + +# get the list of valid CSI Drivers, this will be the list of directories in drivers/ that contain helm charts +get_drivers "${DRIVERDIR}" +# if only one driver was found, set the DRIVER to that one +if [ ${#VALIDDRIVERS[@]} -eq 1 ]; then + DRIVER="${VALIDDRIVERS[0]}" +fi + +while getopts ":h-:" optchar; do + case "${optchar}" in + -) + case "${OPTARG}" in + # NAMESPACE + namespace) + NAMESPACE="${!OPTIND}" + OPTIND=$((OPTIND + 1)) + ;; + namespace=*) + NAMESPACE=${OPTARG#*=} + ;; + # RELEASE + release) + RELEASE="${!OPTIND}" + OPTIND=$((OPTIND + 1)) + ;; + release=*) + RELEASE=${OPTARG#*=} + ;; + *) + echo "Unknown option --${OPTARG}" + echo "For help, run $PROG -h" + exit 1 + ;; + esac + ;; + h) + usage + ;; + *) + echo "Unknown option -${OPTARG}" + echo "For help, run $PROG -h" + exit 1 + ;; + esac +done + +# by default the NAME of the helm release of the driver is the same as the driver name +RELEASE=$(get_release_name "${DRIVER}") + +# validate the parameters passed in +validate_params + +check_for_driver +helm delete -n "${NAMESPACE}" "${RELEASE}" +if [ $? -ne 0 ]; then + echo "Removal of the CSI Driver was unsuccessful" + exit 1 +fi + +echo "Removal of the CSI Driver is in progress." +echo "It may take a few minutes for all pods to terminate." + diff --git a/dell-csi-helm-installer/verify.sh b/dell-csi-helm-installer/verify.sh new file mode 100755 index 00000000..4ec05f84 --- /dev/null +++ b/dell-csi-helm-installer/verify.sh @@ -0,0 +1,605 @@ +#!/bin/bash +# +# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +PROG="${0}" +source "$SCRIPTDIR"/common.sh + +declare -a VALIDDRIVERS + +# verify-csi-powermax method +function verify-csi-powermax() { + verify_k8s_versions "1" "17" "1" "19" + verify_namespace "${NS}" + verify_required_secrets "${RELEASE}-creds" + verify_optional_secrets "${RELEASE}-certs" + verify_optional_secrets "csirevproxy-tls-secret" + verify_alpha_snap_resources + verify_beta_snap_requirements + verify_iscsi_installation + verify_helm_3 +} + +# +# verify-csi-isilon method +function verify-csi-isilon() { + verify_k8s_versions "1" "17" "1" "19" + verify_namespace "${NS}" + verify_required_secrets "${RELEASE}-creds" + verify_optional_secrets "${RELEASE}-certs" + verify_alpha_snap_resources + verify_beta_snap_requirements + verify_helm_3 +} + +# +# verify-csi-vxflexos method +function verify-csi-vxflexos() { + verify_k8s_versions "1" "17" "1" "19" + verify_namespace "${NS}" + verify_required_secrets "${RELEASE}-creds" + verify_sdc_installation + verify_alpha_snap_resources + verify_beta_snap_requirements + verify_helm_3 +} + +# verify-csi-powerstore method +function verify-csi-powerstore() { + verify_k8s_versions "1" "17" "1" "19" + verify_namespace "${NS}" + verify_required_secrets "${RELEASE}-creds" + verify_alpha_snap_resources + verify_beta_snap_requirements + verify_powerstore_node_configuration + verify_helm_3 +} + +# verify-csi-unity method +function verify-csi-unity() { + verify_k8s_versions "1" "17" "1" "19" + verify_namespace "${NS}" + verify_required_secrets "${RELEASE}-creds" + verify_required_secrets "${RELEASE}-certs-0" + verify_alpha_snap_resources + verify_beta_snap_requirements + verify_helm_3 +} + +# +# verify-driver will call the proper method to verify a specific driver +function verify-driver() { + if [ -z "${1}" ]; then + echo "Expected one argument, the driver name, to verify-driver. Received none." + exit $EXIT_ERROR + fi + local D="${1}" + # check if a verify-$DRIVER function exists + # if not, error and exit + # if yes, check to see if it should be run and run it + FNTYPE=$(type -t verify-$D) + if [ "$FNTYPE" != "function" ]; then + echo "ERROR: verify-$D function does not exist" + exit $EXIT_ERROR + else + header + log step "Driver: ${D}" + echo + verify-$D + summary + fi +} + +# Print usage information +function usage() { + echo + echo "Help for $PROG" + echo + echo "Usage: $PROG options..." + echo "Options:" + echo " Required" + echo " --namespace[=] Kubernetes namespace to install the CSI driver" + echo " --values[=] Values file, which defines configuration values" + + echo " Optional" + echo " --skip-verify-node Skip worker node verification checks" + echo " --release[=] Name to register with helm, default value will match the driver name" + echo " --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root" + echo " --snapshot-crd Signifies that the Snapshot CRDs will be installed as part of installation." + echo " -h Help" + echo + + exit $EXIT_WARNING +} + +# print header information +function header() { + log section "Verifying Kubernetes and driver configuration" + echo "|- Kubernetes Version: ${kMajorVersion}.${kMinorVersion}" +} + +# Check if the SDC is installed and the kernel module loaded +function verify_sdc_installation() { + if [ ${NODE_VERIFY} -eq 0 ]; then + return + fi + log step "Verifying the SDC installation" + + error=0 + missing=() + for node in $MINION_NODES; do + # check is the scini kernel module is loaded + ssh ${NODEUSER}@$node "/sbin/lsmod | grep scini" >/dev/null 2>&1 + rv=$? + if [ $rv -ne 0 ]; then + missing+=($node) + error=1 + found_warning "SDC was not found on node: $node" + fi + done + check_error error +} + +function verify_powerstore_node_configuration() { + if [ ${NODE_VERIFY} -eq 0 ]; then + return + fi + + log step "Verifying PowerStore node configuration" + echo + + if ls "${VALUES}" >/dev/null; then + if grep -c "scsiProtocol:[[:blank:]]\+FC" "${VALUES}" >/dev/null; then + log arrow + verify_fc_installation + elif grep -c "scsiProtocol:[[:blank:]]\+ISCSI" "${VALUES}" >/dev/null; then + log arrow + verify_iscsi_installation "small" + elif grep -c "scsiProtocol:[[:blank:]]\+auto" "${VALUES}" >/dev/null; then + log arrow + verify_iscsi_installation "small" + log arrow + verify_fc_installation "small" + elif grep -c "scsiProtocol:[[:blank:]]\+None" "${VALUES}" >/dev/null; then + log step_warning + found_warning "Neither FC nor iSCSI connection is activated, please be sure that NFS settings are correct" + else + log step_failure + found_error "Incorrect scsiProtocol value, must be 'FC', 'ISCSI', 'auto' or 'None'" + fi + else + log step_failure + found_error "${VALUES} doesn't exists" + fi +} + +# Check if the iSCSI client is installed +function verify_iscsi_installation() { + if [ ${NODE_VERIFY} -eq 0 ]; then + return + fi + + log smart_step "Verifying iSCSI installation" "$1" + + error=0 + for node in $MINION_NODES; do + # check if the iSCSI client is installed + ssh ${NODEUSER}@"${node}" "cat /etc/iscsi/initiatorname.iscsi" >/dev/null 2>&1 + rv=$? + if [ $rv -ne 0 ]; then + error=1 + found_warning "iSCSI client was not found on node: $node" + fi + ssh ${NODEUSER}@"${node}" pgrep iscsid &>/dev/null + rv=$? + if [ $rv -ne 0 ]; then + error=1 + found_warning "iscsid is not running on node: $node" + fi + done + + check_error error +} + +# Check if the fc is installed +function verify_fc_installation() { + if [ ${NODE_VERIFY} -eq 0 ]; then + return + fi + + log smart_step "Verifying FC installation" "$1" + + error=0 + for node in $MINION_NODES; do + # check if FC hosts are available + ssh ${NODEUSER}@${node} 'ls --hide=* /sys/class/fc_host/* 1>/dev/null' &>/dev/null + rv=$? + if [[ ${rv} -ne 0 ]]; then + error=1 + found_warning "can't find any FC hosts on node: $node" + fi + done + + check_error error +} + +# verify secrets exist +function verify_required_secrets() { + log step "Verifying that required secrets have been created" + + error=0 + for N in "${@}"; do + # Make sure the secret has already been established + kubectl get secrets -n "${NS}" 2>/dev/null | grep "${N}" --quiet + if [ $? -ne 0 ]; then + error=1 + found_error "Required secret, ${N}, does not exist." + fi + done + check_error error +} + +function verify_optional_secrets() { + log step "Verifying that optional secrets have been created" + + error=0 + for N in "${@}"; do + # Make sure the secret has already been established + kubectl get secrets -n "${NS}" 2>/dev/null | grep "${N}" --quiet + if [ $? -ne 0 ]; then + error=1 + found_warning "Optional secret, ${N}, does not exist." + fi + done + check_error error +} + +# verify minimum and maximum k8s versions +function verify_k8s_versions() { + log step "Verifying Kubernetes versions" + echo + log arrow + verify_min_k8s_version "$1" "$2" "small" + log arrow + verify_max_k8s_version "$3" "$4" "small" +} + +# verify minimum k8s version +function verify_min_k8s_version() { + log smart_step "Verifying minimum Kubernetes version" "$3" + + error=0 + if [[ "${1}" -gt "${kMajorVersion}" ]]; then + error=1 + found_error "Kubernetes version, ${kMajorVersion}.${kMinorVersion}, is too old. Minimum required version is: ${1}.${2}" + fi + if [[ "${2}" -gt "${kMinorVersion}" ]]; then + error=1 + found_error "Kubernetes version, ${kMajorVersion}.${kMinorVersion}, is too old. Minimum required version is: ${1}.${2}" + fi + + check_error error +} + +# verify maximum k8s version +function verify_max_k8s_version() { + log smart_step "Verifying maximum Kubernetes version" "$3" + + error=0 + if [[ "${1}" -lt "${kMajorVersion}" ]]; then + error=1 + found_warning "Kubernetes version, ${kMajorVersion}.${kMinorVersion}, is newer than has been tested. Last tested version is: ${1}.${2}" + fi + if [[ "${2}" -lt "${kMinorVersion}" ]]; then + error=1 + found_warning "Kubernetes version, ${kMajorVersion}.${kMinorVersion}, is newer than has been tested. Last tested version is: ${1}.${2}" + fi + + check_error error +} + +# verify namespace +function verify_namespace() { + log step "Verifying that required namespaces have been created" + + error=0 + for N in "${@}"; do + # Make sure the namespace exists + kubectl describe namespace "${N}" >/dev/null 2>&1 + if [ $? -ne 0 ]; then + error=1 + found_error "Namespace does not exist: ${N}" + fi + done + + check_error error +} + +# verify that the no alpha version of volume snapshot resource is present on the system +function verify_alpha_snap_resources() { + log step "Verifying alpha snapshot resources" + echo + log arrow + log smart_step "Verifying that alpha snapshot CRDs are not installed" "small" + + error=0 + # check for the alpha snapshot CRDs. These shouldn't be present for installation to proceed with + CRDS=("VolumeSnapshotClasses" "VolumeSnapshotContents" "VolumeSnapshots") + for C in "${CRDS[@]}"; do + # Verify that alpha snapshot related CRDs/CRs are not there on the system. + kubectl explain ${C} 2> /dev/null | grep "^VERSION.*v1alpha1$" --quiet + if [ $? -eq 0 ]; then + error=1 + found_error "The alhpa CRD for ${C} is installed. Please uninstall it" + if [[ $(kubectl get ${C} -A --no-headers 2>/dev/null | wc -l) -ne 0 ]]; then + found_error " Found CR for alpha CRD ${C}. Please delete it" + fi + fi + done + check_error error +} + +# verify that the requirements for beta snapshot support exist +function verify_beta_snap_requirements() { + log step "Verifying beta snapshot support" + echo + log arrow + log smart_step "Verifying that beta snapshot CRDs are available" "small" + + error=0 + # check for the CRDs. These are required for installation + CRDS=("VolumeSnapshotClasses" "VolumeSnapshotContents" "VolumeSnapshots") + for C in "${CRDS[@]}"; do + # Verify if snapshot related CRDs are there on the system. If not install them. + kubectl explain ${C} 2> /dev/null | grep "^VERSION.*v1beta1$" --quiet + if [ $? -ne 0 ]; then + error=1 + if [ "${INSTALL_CRD}" == "yes" ]; then + found_warning "The beta CRD for ${C} is not installed. They will be installed because --snapshot-crd was specified" + else + found_error "The beta CRD for ${C} is not installed. These can be installed by specifying --snapshot-crd during installation" + fi + fi + done + check_error error + + log arrow + log smart_step "Verifying that beta snapshot controller is available" "small" + + error=0 + # check for the snapshot-controller. These are strongly suggested but not required + kubectl get pods -A | grep snapshot-controller --quiet + if [ $? -ne 0 ]; then + error=1 + found_warning "The Snapshot Controller does not seem to be deployed. The Snapshot Controller should be provided by the Kubernetes vendor or administrator." + fi + + check_error error +} + +# verify that helm is v3 or above +function verify_helm_3() { + log step "Verifying helm version" + + error=0 + # Check helm installer version + helm --help >&/dev/null || { + found_error "helm is required for installation" + log step_failure + return + } + + helm version | grep "v3." --quiet + if [ $? -ne 0 ]; then + error=1 + found_error "Driver installation is supported only using helm 3" + fi + + check_error error +} + +# found_error, installation will not continue +function found_error() { + for N in "$@"; do + ERRORS+=("${N}") + done +} + +# found_warning, installation can continue +function found_warning() { + for N in "$@"; do + WARNINGS+=("${N}") + done +} + +# Print a nice summary at the end +function summary() { + echo + log section "Verification Complete" + # print all the WARNINGS + NON_CRD_WARNINGS=0 + if [ "${#WARNINGS[@]}" -ne 0 ]; then + log warnings + for E in "${WARNINGS[@]}"; do + echo "- ${E}" + echo ${E} | grep --quiet "^The beta CRD for VolumeSnapshot" + if [ $? -ne 0 ]; then + NON_CRD_WARNINGS=1 + fi + done + RC=$EXIT_WARNING + if [ "${INSTALL_CRD}" == "yes" -a ${NON_CRD_WARNINGS} -eq 0 ]; then + RC=$EXIT_SUCCESS + fi + fi + + # print all the ERRORS + if [ "${#ERRORS[@]}" -ne 0 ]; then + log errors + for E in "${ERRORS[@]}"; do + echo "- ${E}" + done + RC=$EXIT_ERROR + fi + + return $RC +} + +# +# validate_params will validate the parameters passed in +function validate_params() { + # make sure the driver was specified + if [ -z "${DRIVER}" ]; then + echo "No driver specified" + usage + exit 1 + fi + # make sure the driver name is valid + if [[ ! "${VALIDDRIVERS[@]}" =~ "${DRIVER}" ]]; then + echo "Driver: ${DRIVER} is invalid." + echo "Valid options are: ${VALIDDRIVERS[@]}" + usage + exit 1 + fi + # the namespace is required + if [ -z "${NS}" ]; then + echo "No namespace specified" + usage + exit 1 + fi + # values file + if [ -z "${VALUES}" ]; then + echo "No values file was specified" + usage + exit 1 + fi + if [ ! -f "${VALUES}" ]; then + echo "Unable to read values file at: ${VALUES}" + usage + exit 1 + fi +} + +# +# main +# +# default values + +NODE_VERIFY=1 + +# exit codes +EXIT_SUCCESS=0 +EXIT_WARNING=1 +EXIT_ERROR=99 + +# arrays of messages +WARNINGS=() +ERRORS=() + +INSTALL_CRD="no" + +# make sure kubectl is available +kubectl --help >&/dev/null || { + echo "kubectl required for verification... exiting" + exit $EXIT_ERROR +} + +# Determine the nodes +MINION_NODES=$(kubectl get nodes -o wide | grep -v -e master -e INTERNAL | awk ' { print $6; }') +MASTER_NODES=$(kubectl get nodes -o wide | awk ' /master/{ print $6; }') +# Get the kubernetes major and minor version numbers. +kMajorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Major:"//' -e 's/[^0-9].*//g') +kMinorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Minor:"//' -e 's/[^0-9].*//g') + +# get the list of valid CSI Drivers, this will be the list of directories in drivers/ that contain helm charts +get_drivers "${SCRIPTDIR}/../helm" +# if only one driver was found, set the DRIVER to that one +if [ ${#VALIDDRIVERS[@]} -eq 1 ]; then + DRIVER="${VALIDDRIVERS[0]}" +fi + +while getopts ":h-:" optchar; do + case "${optchar}" in + -) + case "${OPTARG}" in + # INSTALL_CRD. Signifies that we were asked to install the CRDs + snapshot-crd) + INSTALL_CRD="yes" + ;; + skip-verify-node) + NODE_VERIFY=0 + ;; + # NAMESPACE + namespace) + NS="${!OPTIND}" + if [[ -z ${NS} || ${NS} == "--skip-verify" ]]; then + NS=${DEFAULT_NS} + else + OPTIND=$((OPTIND + 1)) + fi + ;; + namespace=*) + NS=${OPTARG#*=} + if [[ -z ${NS} ]]; then NS=${DEFAULT_NS}; fi + ;; + # RELEASE + release) + RELEASE="${!OPTIND}" + OPTIND=$((OPTIND + 1)) + ;; + release=*) + RELEASE=${OPTARG#*=} + ;; + # VALUES + values) + VALUES="${!OPTIND}" + OPTIND=$((OPTIND + 1)) + ;; + values=*) + VALUES=${OPTARG#*=} + ;; + # NODEUSER + node-verify-user) + NODEUSER="${!OPTIND}" + OPTIND=$((OPTIND + 1)) + ;; + node-verify-user=*) + HODEUSER=${OPTARG#*=} + ;; + *) + echo "Unknown option --${OPTARG}" + echo "For help, run $PROG -h" + exit $EXIT_ERROR + ;; + esac + ;; + h) + usage + ;; + *) + echo "Unknown option -${OPTARG}" + echo "For help, run $PROG -h" + exit $EXIT_ERROR + ;; + esac +done + +# by default the NAME of the helm release of the driver is the same as the driver name +RELEASE=$(get_release_name "${DRIVER}") + +#"${RELEASE:-$DRIVER}" +# by default, NODEUSER is root +NODEUSER="${NODEUSER:-root}" + +# validate the parameters passed in +validate_params "${MODE}" + +verify-driver "${DRIVER}" +exit $? diff --git a/docker.mk b/docker.mk index f04477c4..43f2e01b 100644 --- a/docker.mk +++ b/docker.mk @@ -1,15 +1,25 @@ +# docker makefile, included from Makefile, will build/push images with docker or podman +# + # Includes the following generated file to get semantic version information include semver.mk + ifdef NOTES RELNOTE="-$(NOTES)" else RELNOTE= endif +ifeq ($(IMAGETAG),) +IMAGETAG="v$(MAJOR).$(MINOR).$(PATCH)$(RELNOTE)" +endif + + docker: - echo "MAJOR $(MAJOR) MINOR $(MINOR) PATCH $(PATCH) RELNOTE $(RELNOTE) SEMVER $(SEMVER)" - docker build -t "artifactory-sio.isus.emc.com:8129/csi-vxflexos:v$(MAJOR).$(MINOR).$(PATCH)$(RELNOTE)" . + @echo "Base Images is set to: $(BASEIMAGE)" + @echo "Building: $(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" + $(BUILDER) build -t "$(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" --target $(BUILDSTAGE) --build-arg GOPROXY --build-arg BASEIMAGE=$(BASEIMAGE) --build-arg GOVERSION=$(GOVERSION) . push: - echo "MAJOR $(MAJOR) MINOR $(MINOR) PATCH $(PATCH) RELNOTE $(RELNOTE) SEMVER $(SEMVER)" - docker push "artifactory-sio.isus.emc.com:8129/csi-vxflexos:v$(MAJOR).$(MINOR).$(PATCH)$(RELNOTE)" + @echo "Pushing: $(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" + $(BUILDER) push "$(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" diff --git a/go.mod b/go.mod index 3fab8cca..f50cb6c6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,9 @@ module github.com/dell/csi-vxflexos // In order to run unit tests on Windows, you need a stubbed Windows implementation // of the gofsutil package. Use the following replace statements if necessary. +//replace github.com/dell/gofsutil => ./gofsutil +//replace github.com/dell/goscaleio => ./goscaleio go 1.13 @@ -11,8 +13,8 @@ require ( github.com/DATA-DOG/godog v0.7.13 github.com/akutz/memconn v0.1.0 github.com/container-storage-interface/spec v1.1.0 - github.com/dell/gofsutil v1.2.0 - github.com/dell/goscaleio v1.1.0 + github.com/dell/gofsutil v1.3.0 + github.com/dell/goscaleio v1.2.0 github.com/gogo/protobuf v1.2.0 // indirect github.com/golang/protobuf v1.3.1 github.com/gorilla/context v1.1.1 // indirect diff --git a/go.sum b/go.sum index 8bf2b148..e9aac868 100644 --- a/go.sum +++ b/go.sum @@ -27,12 +27,10 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dell/gofsutil v1.2.0 h1:FLvXjNm/mA7y0zpIVgzU61wYIJmNd4VNj8PuY7AR2kw= -github.com/dell/gofsutil v1.2.0/go.mod h1:48eHpMRl0+07uGEnQ7/RE6pTOAVEl74utlGjd0QX/Os= -github.com/dell/goscaleio v1.0.0 h1:YLBcFyK3jQ6wBiFIm6kEE3TsHuTitukAdY14+S7RsBY= -github.com/dell/goscaleio v1.0.0/go.mod h1:gVEFhMQQAwmZsrLNLhcCc8emCMZju9lNcfFVw2H5bss= -github.com/dell/goscaleio v1.1.0 h1:Dk8nGODKnqSO+Qm6fTAgq+izywflH807sWSJ2j+gaQM= -github.com/dell/goscaleio v1.1.0/go.mod h1:gVEFhMQQAwmZsrLNLhcCc8emCMZju9lNcfFVw2H5bss= +github.com/dell/gofsutil v1.3.0 h1:6iDzLAdvrusB5p1yxsW45D2bC9+PUX64tJhH3tgGBN8= +github.com/dell/gofsutil v1.3.0/go.mod h1:48eHpMRl0+07uGEnQ7/RE6pTOAVEl74utlGjd0QX/Os= +github.com/dell/goscaleio v1.2.0 h1:97x2rM0cRlBhy3povQe1OhxF4uI9vCgjRb/o19nP2d0= +github.com/dell/goscaleio v1.2.0/go.mod h1:xrLhA17HgAXG616N7jQOatzVuxeZ5rfYsGSUBaQ7U8I= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= @@ -50,7 +48,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -71,6 +68,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jarcoal/httpmock v1.0.6/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= @@ -118,7 +116,6 @@ github.com/rexray/gocsi v1.1.0/go.mod h1:kr6L70GxUU6Gu8ehq2dWQmwdILR1tmE05c/OYaT github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -128,7 +125,6 @@ github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -146,19 +142,15 @@ go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20171023145632-2509b142fb2b h1:vXxKaRjFiMao1tDygYZfT9iEZkE49b7scEND45gopd0= golang.org/x/crypto v0.0.0-20171023145632-2509b142fb2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 h1:x/bBzNauLQAlE3fLku/xy92Y8QwKX5HZymrMz2IiKFc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= @@ -170,10 +162,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20171028101351-661970f62f58/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -183,7 +173,6 @@ golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b h1:qMK98NmNCRVDIYFycQ5yVRkvgDUFfdP8Ip4KqmDEB7g= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= diff --git a/helm/betasnapshotclass.yaml b/helm/betasnapshotclass.yaml new file mode 100644 index 00000000..a8572875 --- /dev/null +++ b/helm/betasnapshotclass.yaml @@ -0,0 +1,6 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: vxflexos-snapclass +driver: csi-vxflexos.dellemc.com +deletionPolicy: Delete diff --git a/helm/common.bash b/helm/common.bash deleted file mode 100644 index db38813a..00000000 --- a/helm/common.bash +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Verify kubeadm and kubectl present -kubectl --help >&/dev/null || { - echo "kubectl required for installation... exiting"; exit 2 -} -kubeadm --help >&/dev/null || { - echo "kubeadm required for installation... exiting"; exit 2 -} - -waitOnRunning() { - TARGET=$(kubectl get pods -n ${NS} | grep ${NS} | wc -l) - RUNNING=0 - while [ $RUNNING -ne $TARGET ]; - do - sleep 10 - TARGET=$(kubectl get pods -n ${NS} | grep ${NS} | wc -l) - RUNNING=$(kubectl get pods -n ${NS} | grep "Running" | wc -l) - date - echo running $RUNNING / $TARGET - kubectl get pods -n ${NS} - done -} - -# Get the kubernetes major and minor version numbers. -kMajorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Major:"//' -e 's/",.*//') -kMinorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Minor:"//' -e 's/",.*//') - diff --git a/helm/csi-vxflexos/Chart.yaml b/helm/csi-vxflexos/Chart.yaml index 04646a38..71642f1f 100644 --- a/helm/csi-vxflexos/Chart.yaml +++ b/helm/csi-vxflexos/Chart.yaml @@ -1,6 +1,7 @@ name: csi-vxflexos -version: 1.1.5 -appVersion: 1.1.5 +version: 1.2.0 +appVersion: 1.2.0 +apiVersion: v2 description: | VxFlex OS CSI (Container Storage Interface) driver Kubernetes integration. This chart includes everything required to provision via CSI as diff --git a/helm/csi-vxflexos/driver-image.yaml b/helm/csi-vxflexos/driver-image.yaml new file mode 100644 index 00000000..f317bcd2 --- /dev/null +++ b/helm/csi-vxflexos/driver-image.yaml @@ -0,0 +1,4 @@ +# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED. +images: + # "images.driver" defines the container images used for the driver container. + driver: dellemc/csi-vxflexos:v1.2.0.000R diff --git a/helm/csi-vxflexos/k8s-1.14-values.yaml b/helm/csi-vxflexos/k8s-1.17-values.yaml similarity index 62% rename from helm/csi-vxflexos/k8s-1.14-values.yaml rename to helm/csi-vxflexos/k8s-1.17-values.yaml index 1a70f067..4a43ca75 100644 --- a/helm/csi-vxflexos/k8s-1.14-values.yaml +++ b/helm/csi-vxflexos/k8s-1.17-values.yaml @@ -1,20 +1,22 @@ # IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED. -kubeversion: "v1.14" +kubeversion: "v1.17" images: # "images.attacher" defines the container images used for the csi attacher # container. - attacher: quay.io/k8scsi/csi-attacher:v1.2.1 + attacher: quay.io/k8scsi/csi-attacher:v2.2.0 # "images.provisioner" defines the container images used for the csi provisioner # container. - #provisioner: quay.io/k8scsi/csi-provisioner:v0.4.2 #for CSI 0.3.0 only (obsolete) - provisioner: quay.io/k8scsi/csi-provisioner:v1.2.1 + provisioner: quay.io/k8scsi/csi-provisioner:v1.5.0 # "images.snapshotter" defines the container image used for the csi snapshotter - snapshotter: quay.io/k8scsi/csi-snapshotter:v1.2.2 + snapshotter: quay.io/k8scsi/csi-snapshotter:v2.1.1 # "images.registrar" defines the container images used for the csi registrar # container. registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 + # "images.resizer" defines the container images used for the csi resizer + #container. + resizer: quay.io/k8scsi/csi-resizer:v0.5.0 diff --git a/helm/csi-vxflexos/k8s-1.13-values.yaml b/helm/csi-vxflexos/k8s-1.18-values.yaml similarity index 54% rename from helm/csi-vxflexos/k8s-1.13-values.yaml rename to helm/csi-vxflexos/k8s-1.18-values.yaml index 6e915f1d..77e8cf68 100644 --- a/helm/csi-vxflexos/k8s-1.13-values.yaml +++ b/helm/csi-vxflexos/k8s-1.18-values.yaml @@ -1,19 +1,23 @@ # IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED. -kubeversion: "v1.13" +kubeversion: "v1.18" images: # "images.attacher" defines the container images used for the csi attacher # container. - attacher: quay.io/k8scsi/csi-attacher:v1.0.1 + attacher: quay.io/k8scsi/csi-attacher:v2.2.0 # "images.provisioner" defines the container images used for the csi provisioner # container. - provisioner: quay.io/k8scsi/csi-provisioner:v1.0.1 + provisioner: quay.io/k8scsi/csi-provisioner:v1.6.0 # "images.snapshotter" defines the container image used for the csi snapshotter - snapshotter: quay.io/k8scsi/csi-snapshotter:v1.0.1 + snapshotter: quay.io/k8scsi/csi-snapshotter:v2.1.1 # "images.registrar" defines the container images used for the csi registrar # container. - registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 + registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 + + # "images.resizer" defines the container images used for the csi resizer + #container. + resizer: quay.io/k8scsi/csi-resizer:v0.5.0 diff --git a/helm/csi-vxflexos/k8s-1.16-values.yaml b/helm/csi-vxflexos/k8s-1.19-values.yaml similarity index 53% rename from helm/csi-vxflexos/k8s-1.16-values.yaml rename to helm/csi-vxflexos/k8s-1.19-values.yaml index b020b210..25a445a9 100644 --- a/helm/csi-vxflexos/k8s-1.16-values.yaml +++ b/helm/csi-vxflexos/k8s-1.19-values.yaml @@ -1,18 +1,23 @@ -# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DONLOADED. -kubeversion: "v1.16" +# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED. +kubeversion: "v1.19" images: # "images.attacher" defines the container images used for the csi attacher # container. - attacher: quay.io/k8scsi/csi-attacher:v1.2.1 + attacher: quay.io/k8scsi/csi-attacher:v2.2.0 # "images.provisioner" defines the container images used for the csi provisioner # container. - provisioner: quay.io/k8scsi/csi-provisioner:v1.4.0 + provisioner: quay.io/k8scsi/csi-provisioner:v1.6.0 # "images.snapshotter" defines the container image used for the csi snapshotter - snapshotter: quay.io/k8scsi/csi-snapshotter:v1.2.0 + snapshotter: quay.io/k8scsi/csi-snapshotter:v2.1.1 # "images.registrar" defines the container images used for the csi registrar # container. registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 + + # "images.resizer" defines the container images used for the csi resizer + # container. + resizer: quay.io/k8scsi/csi-resizer:v0.5.0 + diff --git a/helm/csi-vxflexos/templates/controller.yaml b/helm/csi-vxflexos/templates/controller.yaml index 063cd0fc..4aec8cfc 100644 --- a/helm/csi-vxflexos/templates/controller.yaml +++ b/helm/csi-vxflexos/templates/controller.yaml @@ -20,7 +20,10 @@ rules: verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] - apiGroups: [""] resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] @@ -47,8 +50,8 @@ rules: resources: ["volumesnapshots"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots/status"] - verbs: ["update"] + resources: ["volumesnapshots/status","volumesnapshotcontents/status"] + verbs: ["get", "list", "watch", "update"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["create", "list", "watch", "delete", "update"] @@ -75,8 +78,6 @@ spec: selector: matchLabels: app: {{ .Release.Name }}-controller - updateStrategy: - type: RollingUpdate serviceName: {{ .Release.Name }}-controller replicas: {{ required "Must provide the number of controller instances to create." .Values.controllerCount }} template: @@ -106,8 +107,7 @@ spec: {{- if eq .Values.kubeversion "v1.13" }} - "--connection-timeout=300s" - "--provisioner=csi-vxflexos.dellemc.com" - {{- end}} - {{- if eq .Values.kubeversion "v1.14" }} + {{- else }} - "--timeout=120s" {{- end}} - "--v=5" @@ -131,6 +131,17 @@ spec: volumeMounts: - name: socket-dir mountPath: /var/run/csi + - name: resizer + image: {{ required "Must provide the CSI resizer container image." .Values.images.resizer }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + env: + - name: ADDRESS + value: /var/run/csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/run/csi - name: driver image: {{ required "Must provide the VxFlex OS driver container image." .Values.images.driver }} imagePullPolicy: Always diff --git a/helm/csi-vxflexos/templates/node.yaml b/helm/csi-vxflexos/templates/node.yaml index fcc75ea4..ada4b405 100644 --- a/helm/csi-vxflexos/templates/node.yaml +++ b/helm/csi-vxflexos/templates/node.yaml @@ -90,11 +90,16 @@ spec: secretKeyRef: name: {{ .Release.Name }}-creds key: password + - name: X_CSI_VXFLEXOS_ENDPOINT + value: {{ required "Must provide a VxFlex OS REST API gateway HTTPS endpoint." .Values.restGateway }} + - name: X_CSI_VXFLEXOS_INSECURE + value: "true" volumeMounts: - name: driver-path mountPath: /var/lib/kubelet/plugins/vxflexos.emc.dell.com - name: volumedevices-path mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices + mountPropagation: "Bidirectional" - name: pods-path mountPath: /var/lib/kubelet/pods mountPropagation: "Bidirectional" diff --git a/helm/csi-vxflexos/templates/snapclass.yaml b/helm/csi-vxflexos/templates/snapclass.yaml new file mode 100644 index 00000000..a8572875 --- /dev/null +++ b/helm/csi-vxflexos/templates/snapclass.yaml @@ -0,0 +1,6 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: vxflexos-snapclass +driver: csi-vxflexos.dellemc.com +deletionPolicy: Delete diff --git a/helm/csi-vxflexos/templates/storageclass-xfs.yaml b/helm/csi-vxflexos/templates/storageclass-xfs.yaml index db0ee3b5..6a34f380 100644 --- a/helm/csi-vxflexos/templates/storageclass-xfs.yaml +++ b/helm/csi-vxflexos/templates/storageclass-xfs.yaml @@ -5,6 +5,15 @@ metadata: annotations: provisioner: csi-vxflexos.dellemc.com reclaimPolicy: {{ required "Must provide a storage class reclaim policy." .Values.storageClass.reclaimPolicy }} +allowVolumeExpansion: true parameters: storagepool: {{ required "Must provide a VxFlex OS storage pool name." .Values.storagePool }} FsType: xfs +volumeBindingMode: WaitForFirstConsumer +allowedTopologies: +- matchLabelExpressions: + - key: csi-vxflexos.dellemc.com/{{ required "Must provide the VxFlex OS system Name." .Values.systemName }} + values: + - csi-vxflexos.dellemc.com + + diff --git a/helm/csi-vxflexos/templates/storageclass.yaml b/helm/csi-vxflexos/templates/storageclass.yaml index 3e1a06c7..81b696e1 100644 --- a/helm/csi-vxflexos/templates/storageclass.yaml +++ b/helm/csi-vxflexos/templates/storageclass.yaml @@ -6,5 +6,12 @@ metadata: storageclass.beta.kubernetes.io/is-default-class: {{ .Values.storageClass.isDefault | quote }} provisioner: csi-vxflexos.dellemc.com reclaimPolicy: {{ required "Must provide a storage class reclaim policy." .Values.storageClass.reclaimPolicy }} +allowVolumeExpansion: true parameters: storagepool: {{ required "Must provide a VxFlex OS storage pool name." .Values.storagePool }} +volumeBindingMode: WaitForFirstConsumer +allowedTopologies: +- matchLabelExpressions: + - key: csi-vxflexos.dellemc.com/{{ required "Must provide the VxFlex OS system Name." .Values.systemName }} + values: + - csi-vxflexos.dellemc.com diff --git a/helm/csi-vxflexos/values.yaml b/helm/csi-vxflexos/values.yaml index 9ab25714..3fc575a4 100644 --- a/helm/csi-vxflexos/values.yaml +++ b/helm/csi-vxflexos/values.yaml @@ -45,27 +45,3 @@ storageClass: # "storageClass.reclaimPolicy" defines what will happen when a volume is # removed from the Kubernetes API. Valid values are "Retain" and "Delete". reclaimPolicy: Delete - -# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DONLOADED. -images: - # "images.driver" defines the container images used for the driver container. - driver: dellemc/csi-vxflexos:v1.1.5.000R - - # "images.attacher" defines the container images used for the csi attacher - # container. - #attacher: quay.io/k8scsi/csi-attacher:v0.4.2 #for CSI 0.3.0 only (obsolete) - attacher: quay.io/k8scsi/csi-attacher:v1.0.1 #doesn't work with CSI 0.3.0 driver - - # "images.provisioner" defines the container images used for the csi provisioner - # container. - #provisioner: quay.io/k8scsi/csi-provisioner:v0.4.2 #for CSI 0.3.0 only (obsolete) - provisioner: quay.io/k8scsi/csi-provisioner:v1.0.1 #doesn't work with CSI 0.3.0 driver - - # "images.snapshotter" defines the container image used for the csi snapshotter - snapshotter: quay.io/k8scsi/csi-snapshotter:v1.0.1 - - # "images.registrar" defines the container images used for the csi registrar - # container. - #registrar: quay.io/k8scsi/driver-registrar:v0.4.2 #for CSI 0.3.0 only (obsolete) - registrar: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 #doesn't work with CSI 0.3.0 driver - diff --git a/helm/install.vxflexos b/helm/install.vxflexos deleted file mode 100755 index a97f7dd9..00000000 --- a/helm/install.vxflexos +++ /dev/null @@ -1,59 +0,0 @@ -#/bin/bash -# Verify the kubernetes installation has the feature gates needed. -export NS=vxflexos -source ./common.bash - -sh ./verify.kubernetes -rc=$? -if [ $rc -ne 0 ] ; - then echo "*******************************************************************************" - echo "Warning: Kubernetes --feature-gates not correctly configured... it may not work" - echo "*******************************************************************************" - sleep 5 -fi - - -# Make sure the vxflexos-cred has already been established -echo kubectl get secrets -n vxflexos | grep vxflexos-cred --quiet -kubectl get secrets -n vxflexos | grep vxflexos-cred --quiet -if [ $? -ne 0 ]; - then echo "*** YOU MUST PROVIDE VxFlex OS credentials in a Kubernetes secret- see secret.yaml template ***" - exit 2 -fi -# Check for required CustomResourceDefinitions -if [ $kMinorVersion == "13" ]; -then - kubectl get customresourcedefinitions | grep csidrivers --quiet - if [ $? -ne 0 ]; - then echo "installing csidriver CRD"; kubectl create -f csidriver.yaml - fi - kubectl get customresourcedefinitions | grep nodeinfo --quiet - if [ $? -ne 0 ]; - then echo "installing nodeinfo CRD"; kubectl create -f nodeinfo.yaml - fi -fi - - -echo helm install --values myvalues.yaml --values csi-vxflexos/k8s-${kMajorVersion}.${kMinorVersion}-values.yaml --namespace vxflexos vxflexos ./csi-vxflexos -helm install --values myvalues.yaml --values csi-vxflexos/k8s-${kMajorVersion}.${kMinorVersion}-values.yaml --namespace vxflexos vxflexos ./csi-vxflexos - -waitOnRunning - - -echo "CSIDrivers:" -kubectl get csidrivers - -if [ $kMinorVersion == "13" ]; -then - echo "CSINodeInfos:" - kubectl get csinodeinfos -fi - -echo "StorageClasses:" -kubectl get storageclass -kubectl get volumesnapshotclass | grep vxflexos-snapclass --quiet -if [ $? -ne 0 ]; - then echo "installing volumesnapshotclass"; kubectl create -f volumesnapshotclass.yaml -fi -echo "VolumeSnapshotClasses:" -kubectl get volumesnapshotclass diff --git a/helm/uninstall.vxflexos b/helm/uninstall.vxflexos deleted file mode 100755 index 152d0339..00000000 --- a/helm/uninstall.vxflexos +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -helm delete -n vxflexos vxflexos -sleep 10 -kubectl get pods -n vxflexos -kubectl get volumesnapshotclass | grep vxflexos-snapclass --quiet -if [ $? -eq 0 ]; - then kubectl delete volumesnapshotclass vxflexos-snapclass -fi diff --git a/helm/verify.kubernetes b/helm/verify.kubernetes deleted file mode 100755 index a20e9508..00000000 --- a/helm/verify.kubernetes +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/sh - -# Determine the kubernetes version -kubeversion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*GitVersion:"//' -e 's/",.*//') -echo Kubernetes version $kubeversion - -# Determine the nodes -MINION_NODES=$(kubectl get nodes -o wide | grep -v -e master -e INTERNAL | awk ' { print $6; }') -MASTER_NODES=$(kubectl get nodes -o wide | awk ' /master/{ print $6; }') -echo Kubernetes master nodes: $MASTER_NODES -echo Kubernetes minion nodes: $MINION_NODES - -echo Verifying the SDC installation. -sdcfail=0 -for node in $MINION_NODES -do - ssh $node /opt/emc/scaleio/sdc/bin/drv_cfg --query_mdm - rv=$? - if [ $rv -ne 0 ]; then - echo "*******************************************************************" - echo "Node $node doees not have the SDC installed"; - echo "*******************************************************************" - sdcfail=1; - fi - -done -if [ $sdcfail -ne 0 ]; then echo "YOU MUST INSTALL THE VXFLEXOS SDC ON ALL MINION (WORKER) NODES"; exit 2; fi - -# Variables used for verification -if [ "${kubeversion}" == "13" ]; then -FEATURE_GATES="VolumeSnapshotDataSource CSINodeInfo CSIDriverRegistry CSIBlockVolume" -else -FEATURE_GATES="VolumeSnapshotDataSource" -fi - -MASTER_PROCS="kubelet kube-apiserver kube-scheduler kube-controller-manager" -MINION_PROCS="kubelet" -fail=0 - -echo Verifying the feature gates. -for node in $MASTER_NODES -do - echo ssh $node ps -ef >.ps.out - ssh $node ps -ef >.ps.out - for gate in $FEATURE_GATES - do - #echo checking $node for $gate ... - for proc in $MASTER_PROCS - do - #echo proc $proc - count=$(grep -e " $proc" -e "/$proc" .ps.out | grep -c $gate) - #echo $node $gate $proc $count - [ $count -ne "1" ] && { echo "node $node proc $proc gate $gate failed"; fail=1; } - done - done -done - -for node in $MINION_NODES -do - echo ssh $node ps -ef >.ps.out - ssh $node ps -ef >.ps.out - for gate in $FEATURE_GATES - do - #echo checking $node for $gate ... - for proc in $MINION_PROCS - do - #echo proc $proc - count=$(grep -e " $proc" -e "/$proc" .ps.out | grep -c $gate) - #echo $node $gate $proc $count - [ $count -ne "1" ] && { echo "node $node proc $proc gate $gate failed"; fail=1; } - done - done -done - -echo fail: $fail -exit $fail diff --git a/helm/volumesnapshotclass.yaml b/helm/volumesnapshotclass.yaml deleted file mode 100644 index ea10263a..00000000 --- a/helm/volumesnapshotclass.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: snapshot.storage.k8s.io/v1alpha1 -kind: VolumeSnapshotClass -metadata: - name: vxflexos-snapclass -snapshotter: csi-vxflexos.dellemc.com diff --git a/licenses/LICENSE b/licenses/LICENSE new file mode 100644 index 00000000..81defdc3 --- /dev/null +++ b/licenses/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + \ No newline at end of file diff --git a/overrides.mk b/overrides.mk new file mode 100644 index 00000000..0bceb6ee --- /dev/null +++ b/overrides.mk @@ -0,0 +1,65 @@ +# overrides file +# this file, included from the Makefile, will overlay default values with environment variables +# + +# DEFAULT values +DEFAULT_BASEIMAGE="registry.access.redhat.com/ubi7/ubi" +DEFAULT_GOVERSION="1.13.12" +DEFAULT_REGISTRY="sample_registry" +DEFAULT_IMAGENAME="csi-vxflexos" +DEFAULT_BUILDSTAGE="final" + +# set the BASEIMAGE if needed +ifeq ($(BASEIMAGE),) +export BASEIMAGE="$(DEFAULT_BASEIMAGE)" +endif + +# set the GOVERSION if needed +ifeq ($(GOVERSION),) +export GOVERSION="$(DEFAULT_GOVERSION)" +endif + +# set the REGISTRY if needed +ifeq ($(REGISTRY),) +export REGISTRY="$(DEFAULT_REGISTRY)" +endif + +# set the IMAGENAME if needed +ifeq ($(IMAGENAME),) +export IMAGENAME="$(DEFAULT_IMAGENAME)" +endif + +# set the BUILDSTAGE if needed +ifeq ($(BUILDSTAGE),) +export BUILDSTAGE="$(DEFAULT_BUILDSTAGE)" +endif + +# figure out if podman or docker should be used (use podman if found) +ifneq (, $(shell which podman 2>/dev/null)) +export BUILDER=podman +else +export BUILDER=docker +endif + +# target to print some help regarding these overrides and how to use them +overrides-help: + @echo + @echo "The following environment variables can be set to control the build" + @echo + @echo "GOVERSION - The version of Go to build with, default is: $(DEFAULT_GOVERSION)" + @echo " Current setting is: $(GOVERSION)" + @echo "BASEIMAGE - The base container image to build from: $(DEFAULT_BASEIMAGE)" + @echo " Current setting is: $(BASEIMAGE)" + @echo "REGISTRY - The registry to push images to, default is: $(DEFAULT_REGISTRY)" + @echo " Current setting is: $(REGISTRY)" + @echo "IMAGENAME - The image name to be built, defaut is: $(DEFAULT_IMAGENAME)" + @echo " Current setting is: $(IMAGENAME)" + @echo "IMAGETAG - The image tag to be built, default is an empty string which will determine the tag by examining annotated tags in the repo." + @echo " Current setting is: $(IMAGETAG)" + @echo "BUILDSTAGE - The Dockerfile build stage to execute, default is: $(DEFAULT_BUILDSTAGE)" + @echo " Stages can be found by looking at the Dockerfile" + @echo " Current setting is: $(BUILDSTAGE)" + @echo + + + diff --git a/service/controller.go b/service/controller.go index 734ecda9..3578001e 100644 --- a/service/controller.go +++ b/service/controller.go @@ -9,6 +9,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" csi "github.com/container-storage-interface/spec/lib/go/csi" @@ -80,10 +81,32 @@ func (s *service) CreateVolume( return nil, err } - // AccessibleTopology not currently supported + // validate AccessibleTopology accessibility := req.GetAccessibilityRequirements() - if accessibility != nil { - return nil, status.Errorf(codes.InvalidArgument, "Volume AccessibilityRequirements is not currently supported") + requestedSystem := "" + + if accessibility != nil && len(accessibility.GetPreferred()) > 0 { + + segments := accessibility.GetPreferred()[0].GetSegments() + for key := range segments { + if strings.HasPrefix(key, Name) { + tokens := strings.Split(key, "/") + constraint := "" + if len(tokens) > 1 { + constraint = tokens[1] + } + log.Printf("Found topology constraint: VxFlex OS system: %s", constraint) + if constraint == s.system.System.ID { + requestedSystem = s.system.System.ID + } + } + } + + // validate that the system name required matches the systemname that we know about + if len(segments) > 0 && requestedSystem == "" { + return nil, status.Errorf(codes.InvalidArgument, + "Requested System %s is unknown to this controller which is managing System %s", requestedSystem, s.system.System.ID) + } } params := req.GetParameters() @@ -251,6 +274,8 @@ func (s *service) createVolumeFromSnapshot(req *csi.CreateVolumeRequest, if vol.Name == name && vol.StoragePoolID == srcVol.StoragePoolID { log.Printf("Requested volume %s already exists", name) csiVolume := s.getCSIVolume(vol) + csiVolume.ContentSource = req.GetVolumeContentSource() + copyInterestingParameters(req.GetParameters(), csiVolume.VolumeContext) log.Printf("Requested volume (from snap) already exists %s (%s) storage pool %s", csiVolume.VolumeContext["Name"], csiVolume.VolumeId, csiVolume.VolumeContext["StoragePoolName"]) return &csi.CreateVolumeResponse{Volume: csiVolume}, nil @@ -1097,6 +1122,13 @@ func (s *service) ControllerGetCapabilities( }, }, }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }, }, }, nil } @@ -1416,7 +1448,79 @@ func (s *service) DeleteSnapshotConsistencyGroup( } func (s *service) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") + + var reqID string + var err error + headers, ok := metadata.FromIncomingContext(ctx) + if ok { + if req, ok := headers["csi.requestid"]; ok && len(req) > 0 { + reqID = req[0] + } + } + + if err := s.requireProbe(ctx); err != nil { + return nil, err + } + + volID := req.GetVolumeId() + if volID == "" { + return nil, status.Error(codes.InvalidArgument, + "Volume ID is required") + } + + vol, err := s.getVolByID(volID) + if err != nil { + if strings.EqualFold(err.Error(), sioGatewayVolumeNotFound) || strings.Contains(err.Error(), "must be a hexadecimal number") { + return nil, status.Error(codes.NotFound, "volume not found") + } + return nil, status.Errorf(codes.Internal, "failure to load volume: %s", err.Error()) + } + + volName := vol.Name + cr := req.GetCapacityRange() + log.Printf("cr:%d", cr) + requestedSize, err := validateVolSize(cr) + if err != nil { + return nil, err + } + log.Printf("req.size:%d", requestedSize) + fields := map[string]interface{}{ + "RequestID": reqID, + "VolumeName": volName, + "RequestedSize": requestedSize, + } + log.WithFields(fields).Info("Executing ExpandVolume with following fields") + allocatedSize := int64(vol.SizeInKb) + log.Printf("allocatedsize:%d", allocatedSize) + + if requestedSize < allocatedSize { + return &csi.ControllerExpandVolumeResponse{}, nil + } + + if requestedSize == allocatedSize { + log.Infof("Idempotent call detected for volume (%s) with requested size (%d) SizeInKb and allocated size (%d) SizeInKb", + volName, requestedSize, allocatedSize) + return &csi.ControllerExpandVolumeResponse{ + CapacityBytes: requestedSize * bytesInKiB, + NodeExpansionRequired: true}, nil + } + + reqSize := requestedSize / kiBytesInGiB + tgtVol := goscaleio.NewVolume(s.adminClient) + tgtVol.Volume = vol + err = tgtVol.SetVolumeSize(strconv.Itoa(int(reqSize))) + if err != nil { + log.Errorf("Failed to execute ExpandVolume() with error (%s)", err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + + //return the response with NodeExpansionRequired = true, so that CO could call + // NodeExpandVolume subsequently + csiResp := &csi.ControllerExpandVolumeResponse{ + CapacityBytes: requestedSize * bytesInKiB, + NodeExpansionRequired: true, + } + return csiResp, nil } func mergeStringMaps(base map[string]string, additional map[string]string) map[string]string { diff --git a/service/envvars.go b/service/envvars.go index 98ba48f3..bede71de 100644 --- a/service/envvars.go +++ b/service/envvars.go @@ -1,33 +1,34 @@ package service const ( - // EnvEndpoint is the name of the enviroment variable used to set the + // EnvEndpoint is the name of the environment variable used to set the // HTTP endpoint of the ScaleIO Gateway EnvEndpoint = "X_CSI_VXFLEXOS_ENDPOINT" - // EnvUser is the name of the enviroment variable used to set the + // EnvUser is the name of the environment variable used to set the // username when authenticating to the ScaleIO Gateway EnvUser = "X_CSI_VXFLEXOS_USER" - // EnvPassword is the name of the enviroment variable used to set the + // EnvPassword is the name of the environment variable used to set the // user's password when authenticating to the ScaleIO Gateway + /* #nosec G101 */ EnvPassword = "X_CSI_VXFLEXOS_PASSWORD" - // EnvInsecure is the name of the enviroment variable used to specify + // EnvInsecure is the name of the environment variable used to specify // that the ScaleIO Gateway's certificate chain and host name should not // be verified EnvInsecure = "X_CSI_VXFLEXOS_INSECURE" - // EnvSystemName is the name of the enviroment variable used to set the + // EnvSystemName is the name of the environment variable used to set the // name of the ScaleIO system to interact with EnvSystemName = "X_CSI_VXFLEXOS_SYSTEMNAME" - // EnvSDCGUID is the name of the enviroment variable used to set the + // EnvSDCGUID is the name of the environment variable used to set the // GUID of the SDC. This is only used by the Node Service, and removes // a need for calling an external binary to retrieve the GUID EnvSDCGUID = "X_CSI_VXFLEXOS_SDCGUID" - // EnvThick is the name of the enviroment variable used to specify + // EnvThick is the name of the environment variable used to specify // that thick provisioning should be used when creating volumes EnvThick = "X_CSI_VXFLEXOS_THICKPROVISIONING" diff --git a/service/features/authorization_failure.json b/service/features/authorization_failure.json new file mode 100644 index 00000000..5d287e5f --- /dev/null +++ b/service/features/authorization_failure.json @@ -0,0 +1,6 @@ +{ + "message": "Unauthorized", + "httpStatusCode": 401, + "errorCode": 0 +} + diff --git a/service/features/get_system_instances.json b/service/features/get_system_instances.json index 48ab026f..7035d3a6 100644 --- a/service/features/get_system_instances.json +++ b/service/features/get_system_instances.json @@ -2,6 +2,7 @@ { "perfProfile": "Default", "installId": "1c078b073d75512c", + "name": "mocksystem", "systemVersionName": "DellEMC ScaleIO Version: R2_6.0.133", "capacityAlertHighThresholdPercent": 80, "capacityAlertCriticalThresholdPercent": 90, diff --git a/service/features/node_publish_unpublish.feature b/service/features/node_publish_unpublish.feature index 9dac919f..51381a02 100644 --- a/service/features/node_publish_unpublish.feature +++ b/service/features/node_publish_unpublish.feature @@ -13,12 +13,12 @@ Feature: VxFlex OS CSI interface Then the error contains Examples: - | voltype | access | fstype | errormsg | - | "mount" | "single-writer" | "xfs" | "none" | - | "mount" | "single-writer" | "ext4" | "none" | - | "mount" | "multiple-writer" | "ext4" | "Invalid access mode" | - | "block" | "single-writer" | "none" | "none" | - | "block" | "multiple-writer" | "none" | "none" | + | voltype | access | fstype | errormsg | + | "mount" | "single-writer" | "xfs" | "none" | + | "mount" | "single-writer" | "ext4" | "none" | + | "mount" | "multiple-writer" | "ext4" | "Mount volumes do not support AccessMode MULTI_NODE_MULTI_WRITER" | + | "block" | "single-writer" | "none" | "none" | + | "block" | "multiple-writer" | "none" | "none" | Scenario Outline: Node publish block volumes various induced error use cases from examples Given a VxFlexOS service @@ -32,9 +32,10 @@ Feature: VxFlex OS CSI interface Examples: | error | errormsg | + | "NodePublishBlockTargetNotFile" | "existing path is a directory" | | "GOFSMockBindMountError" | "none" | - | "GOFSMockMountError" | "failure bind-mounting block device to private mount" | - | "GOFSMockGetMountsError" | "could not reliably determine existing mount status" | + | "GOFSMockMountError" | "error bind mounting to target path" | + | "GOFSMockGetMountsError" | "Could not getDevMounts" | | "NoSymlinkForNodePublish" | "not published to node" | # may be different for Windows vs. Linux | "NoBlockDevForNodePublish" | "is not a block device@@not published to node" | @@ -42,11 +43,11 @@ Feature: VxFlex OS CSI interface # may be different for Windows vs. Linux | "PrivateDirectoryNotExistForNodePublish"| "cannot find the path specified@@no such file or directory"| | "BlockMkfilePrivateDirectoryNodePublish"| "existing path is not a directory" | - | "NodePublishNoTargetPath" | "target path required" | + | "NodePublishNoTargetPath" | "target path required" | | "NodePublishNoVolumeCapability" | "volume capability required" | - | "NodePublishNoAccessMode" | "volume access mode required" | - | "NodePublishNoAccessType" | "volume access type required" | - | "NodePublishBlockTargetNotFile" | "wrong type (file vs dir)" | + | "NodePublishNoAccessMode" | "Volume Access Mode is required" | + | "NodePublishNoAccessType" | "Volume Access Type is required" | + | "NodePublishBadTargetPath" | "cannot find the path specified@@no such file or directory"| Scenario Outline: Node publish mount volumes various induced error use cases from examples Given a VxFlexOS service @@ -54,28 +55,32 @@ Feature: VxFlex OS CSI interface And a capability with voltype "mount" access "single-writer" fstype "xfs" And get Node Publish Volume Request And I induce error + And I induce error When I call Probe When I call NodePublishVolume "SDC_GUID" Then the error contains Examples: - | error | errormsg | - | "GOFSMockDevMountsError" | "none" | - | "GOFSMockMountError" | "mount induced error" | - | "GOFSMockGetMountsError" | "could not reliably determine existing mount status" | - | "NoSymlinkForNodePublish" | "not published to node" | + | error | errorb | errormsg | + | "GOFSMockDevMountsError" | "none" | "none" | + | "GOFSMockMountError" | "none" | "mount induced error" | + | "GOFSMockGetMountsError" | "none" | "could not reliably determine existing mount status" | + | "NoSymlinkForNodePublish" | "none" | "not published to node" | # may be different for Windows vs. Linux - | "NoBlockDevForNodePublish" | "is not a block device@@not published to node" | - | "TargetNotCreatedForNodePublish" | "none" | + | "NoBlockDevForNodePublish" | "none" | "is not a block device@@not published to node" | + | "TargetNotCreatedForNodePublish" | "none" | "none" | # may be different for Windows vs. Linux - | "PrivateDirectoryNotExistForNodePublish"| "cannot find the path specified@@no such file or directory" | - | "BlockMkfilePrivateDirectoryNodePublish"| "existing path is not a directory" | - | "NodePublishNoTargetPath" | "target path required" | - | "NodePublishNoVolumeCapability" | "volume capability required" | - | "NodePublishNoAccessMode" | "volume access mode required" | - | "NodePublishNoAccessType" | "volume access type required" | - | "NodePublishFileTargetNotDir" | "wrong type (file vs dir)" | - + | "PrivateDirectoryNotExistForNodePublish"| "none" | "cannot find the path specified@@no such file or directory" | + | "BlockMkfilePrivateDirectoryNodePublish"| "none" | "existing path is not a directory" | + | "NodePublishNoTargetPath" | "none" | "target path required" | + | "NodePublishNoVolumeCapability" | "none" | "volume capability required" | + | "NodePublishNoAccessMode" | "none" | "Volume Access Mode is required" | + | "NodePublishNoAccessType" | "none" | "Volume Access Type is required" | + | "NodePublishFileTargetNotDir" | "none" | "existing path is not a directory" | + | "NodePublishPrivateTargetAlreadyCreated"| "none" | "not published to node" | + | "NodePublishPrivateTargetAlreadyMounted"| "none" | "Mount point already in use by device@@none" | + | "NodePublishPrivateTargetAlreadyMounted"| "GOFSMockGetMountsError" | "could not reliably determine existing mount status" | + | "NodePublishBadTargetPath" | "none" | "cannot find the path specified@@no such file or directory"| Scenario Outline: Node publish various use cases from examples when volume already published Given a VxFlexOS service @@ -83,17 +88,18 @@ Feature: VxFlex OS CSI interface And a capability with voltype access fstype When I call Probe And I call NodePublishVolume "SDC_GUID" + And I induce error "NodePublishPathAltDataDir" And I call NodePublishVolume "SDC_GUID" Then the error contains Examples: - | voltype | access | fstype | errormsg | - | "block" | "single-writer" | "none" | "access mode conflicts with existing mounts" | - | "mount" | "single-writer" | "xfs" | "access mode conflicts with existing mounts" | - | "mount" | "single-writer" | "ext4" | "access mode conflicts with existing mounts" | - | "mount" | "multiple-writer" | "ext4" | "Invalid access mode" | -# The following line seems like the wrong behavior; shouldn't this be allowed? - | "block" | "multiple-reader" | "none" | "access mode conflicts with existing mounts" | + | voltype | access | fstype | errormsg | + | "block" | "single-writer" | "none" | "Access mode conflicts with existing mounts" | + | "block" | "multiple-writer" | "none" | "none" | + | "mount" | "single-writer" | "xfs" | "Access mode conflicts with existing mounts" | + | "mount" | "single-writer" | "ext4" | "Access mode conflicts with existing mounts" | + | "mount" | "multiple-writer" | "ext4" | "Mount volumes do not support AccessMode MULTI_NODE_MULTI_WRITER" | + | "block" | "multiple-reader" | "none" | "none" | Scenario Outline: Node publish various use cases from examples when read-only mount volume already published Given a VxFlexOS service @@ -107,15 +113,15 @@ Feature: VxFlex OS CSI interface Then the error contains Examples: - | voltype | access | fstype | errormsg | - | "block" | "multiple-reader" | "none" | "read only not supported for Block Volume" | - | "mount" | "single-reader" | "none" | "none" | - | "mount" | "single-reader" | "xfs" | "none" | - | "mount" | "multiple-reader" | "ext4" | "none" | - | "mount" | "single-writer" | "ext4" | "access mode conflicts with existing mounts" | - | "mount" | "multiple-writer" | "ext4" | "Invalid access mode" | - - Scenario Outline: Node publish various use cases from examples when read-only mount volume already published + | voltype | access | fstype | errormsg | + | "block" | "multiple-reader" | "none" | "read only not supported for Block Volume" | + | "mount" | "single-reader" | "none" | "none" | + | "mount" | "single-reader" | "xfs" | "none" | + | "mount" | "multiple-reader" | "ext4" | "none" | + | "mount" | "single-writer" | "ext4" | "Access mode conflicts with existing mounts" | + | "mount" | "multiple-writer" | "ext4" | "do not support AccessMode MULTI_NODE_MULTI_WRITER" | + + Scenario Outline: Node publish various use cases from examples when read-only mount volume already published and I change the target path Given a VxFlexOS service And a controller published volume And a capability with voltype access fstype @@ -128,14 +134,13 @@ Feature: VxFlex OS CSI interface Then the error contains Examples: - | voltype | access | fstype | errormsg | - | "mount" | "single-reader" | "none" | "none" | - | "mount" | "single-reader" | "xfs" | "none" | - | "block" | "multiple-reader" | "none" | "read only not supported for Block Volume" | - | "mount" | "multiple-reader" | "ext4" | "none" | - | "mount" | "single-writer" | "ext4" | "access mode conflicts with existing mounts" | - | "mount" | "multiple-writer" | "ext4" | "Invalid access mode" | - + | voltype | access | fstype | errormsg | + | "mount" | "single-reader" | "none" | "none" | + | "mount" | "single-reader" | "xfs" | "none" | + | "block" | "multiple-reader" | "none" | "read only not supported for Block Volume" | + | "mount" | "multiple-reader" | "ext4" | "none" | + | "mount" | "single-writer" | "ext4" | "Access mode conflicts with existing mounts" | + | "mount" | "multiple-writer" | "ext4" | "do not support AccessMode MULTI_NODE_MULTI_WRITER" | Scenario: Node publish volume with volume context Given a VxFlexOS service @@ -178,7 +183,7 @@ Feature: VxFlex OS CSI interface | error | errormsg | | "NodeUnpublishBadVolume" | "none" | | "GOFSMockGetMountsError" | "could not reliably determine existing mount status" | - | "NodeUnpublishNoTargetPath" | "target path required" | + | "NodeUnpublishNoTargetPath" | "target path argument is required" | | "GOFSMockUnmountError" | "Error unmounting target" | | "PrivateDirectoryNotExistForNodePublish"| "none" | diff --git a/service/features/service.feature b/service/features/service.feature index 23572347..f2f235b6 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -17,7 +17,6 @@ Feature: VxFlex OS CSI interface When I call Probe Then a valid ProbeResponse is returned -@wip Scenario: Identity Probe call no controller connection Given a VxFlexOS service And the Controller has no connection @@ -57,6 +56,12 @@ Examples: When I call Probe Then the possible error contains "unable to get SDC GUID" + Scenario: Identity Probe call node probe drvCfg error + Given a VxFlexOS service + And there is a Node Probe drvCfg error + When I call Probe + Then the possible error contains "unable to get System Name via config or drv_cfg binary" + Scenario Outline: Create volume good scenario Given a VxFlexOS service When I call Probe @@ -139,12 +144,19 @@ Examples: And I call CreateVolume "volume4" Then the error contains "Couldn't find storage pool" - Scenario: Create volume with Accessibility Requirements + Scenario Outline: Create volume with Accessibility Requirements Given a VxFlexOS service When I call Probe - And I specify AccessibilityRequirements + And I specify AccessibilityRequirements with a SystemID of And I call CreateVolume "accessibility" - Then the error contains "AccessibilityRequirements is not currently supported" + Then the error contains + + Examples: + | sysID | errormsg | + | "f.service.opt.SystemName" | "none" | + | "" | "unknown to this controller" | + | "Unknown" | "unknown to this controller" | + | "badSystem" | "unknown to this controller" | Scenario: Create volume with VolumeContentSource Given a VxFlexOS service @@ -301,6 +313,7 @@ Examples: When I call NodeUnstageVolume Then the error contains "Unimplemented" + Scenario: Call NodeGetCapabilities should return a valid response Given a VxFlexOS service And I call Probe @@ -337,7 +350,7 @@ Examples: And a valid CreateVolumeResponse is returned And I induce error "WrongVolIDError" And I call CreateSnapshot "snap1" - Then the error contains "Failed to create snapshot" + Then the error contains "Failed to create snapshot" Scenario: Snapshot a single block volume but receive error Given a VxFlexOS service @@ -507,6 +520,7 @@ Examples: When I call Probe And I call Create Volume from Snapshot Then the error contains "Failed to create snapshot" + Scenario: Idempotent create a volume from a snapshot Given a VxFlexOS service @@ -519,15 +533,49 @@ Examples: And no error was received And a valid CreateVolumeResponse is returned - Scenario: Call ControllerExpandVolume, should get unimplemented - Given a VxFlexOS service - When I call ControllerExpandVolume - Then the error contains "Unimplemented" - Scenario: Call NodeExpandVolume, should get unimplemented + Scenario Outline: Call ControllerExpandVolume Given a VxFlexOS service - When I call NodeExpandVolume - Then the error contains "Unimplemented" + And I call Probe + And I call CreateVolumeSize "volume10" "32" + And a valid CreateVolumeResponse is returned + And I induce error + Then I call ControllerExpandVolume set to + And the error contains + And I call ControllerExpandVolume set to + Then the error contains + + Examples: + | error | GB | errmsg | + | "none" | 32 | "none" | + | "SetVolumeSizeError" | 64 | "induced error" | + | "none" | 16 | "none" | + | "NoVolumeIDError" | 64 | "Volume ID is required" | + | "none" | 64 | "none" | + | "GetVolByIDError" | 64 | "induced error" | + + Scenario Outline: Call NodeExpandVolume + Given a VxFlexOS service + And I call Probe + And I call CreateVolumeSize "volume4" "32" + And a controller published volume + And a capability with voltype "mount" access "single-writer" fstype "xfs" + And get Node Publish Volume Request + And I call NodePublishVolume "SDC_GUID" + And no error was received + And I induce error + When I call NodeExpandVolume with volumePath as + Then the error contains + + Examples: + | error | volPath | errormsg | + | "none" | "" | "Volume path required" | + | "none" | "test/tmp/datadir" | "none" | + | "GOFSInduceFSTypeError" | "test/tmp/datadir" | "Failed to fetch filesystem" | + | "GOFSInduceResizeFSError" | "test/tmp/datadir" | "Failed to resize device" | + | "NoVolumeIDError" | "test/tmp/datadir" | "Volume ID is required" | + | "none" | "not/a/path/1234" | "Could not stat volume path" | + | "none" | "test/tmp/datafile" | "none" | Scenario: Call NodeGetVolumeStats, should get unimplemented Given a VxFlexOS service @@ -549,7 +597,6 @@ Examples: When i Call getStoragePoolnameByID "123" Then the error contains "cannot find storage pool" -@wip Scenario: Test BeforeServe Given a VxFlexOS service And I invalidate the Probe cache @@ -557,6 +604,26 @@ Examples: # Get different error message on Windows vs. Linux Then the error contains "Unable to initialize cert pool from system@@unable to login to VxFlexOS Gateway@@unable to get SDC GUID" - - - + Scenario: Call Node getAllSystems + Given a VxFlexOS service + And I do not have a gateway connection + When I Call nodeGetAllSystems + + Scenario: Call Node getAllSystems + Given a VxFlexOS service + And I do not have a gateway connection + And I do not have a valid gateway endpoint + When I Call nodeGetAllSystems + Then the error contains "Unable to create ScaleIO client" + + Scenario: Call Node getAllSystems + Given a VxFlexOS service + And I do not have a gateway connection + And I do not have a valid gateway password + When I Call nodeGetAllSystems + Then the error contains "Unable to create ScaleIO client" + + Scenario: Call evalsymlinks + Given a VxFlexOS service + When I call evalsymlink "invalidpath" + Then the error contains "Could not evaluate symlinks for path" diff --git a/service/features/volume.json.template b/service/features/volume.json.template index 8dad6000..a00d7cd8 100644 --- a/service/features/volume.json.template +++ b/service/features/volume.json.template @@ -1,5 +1,5 @@ { - "sizeInKb": 8388608, + "sizeInKb": 33554432, "vtreeId": "c36a59ef00000001", "storagePoolId": "e65f9c2700000000", "isObfuscated": false, diff --git a/service/identity.go b/service/identity.go index b7680e5a..f97613a8 100644 --- a/service/identity.go +++ b/service/identity.go @@ -40,6 +40,13 @@ func (s *service) GetPluginCapabilities( }, }, }, + { + Type: &csi.PluginCapability_VolumeExpansion_{ + VolumeExpansion: &csi.PluginCapability_VolumeExpansion{ + Type: csi.PluginCapability_VolumeExpansion_ONLINE, + }, + }, + }, } } return &rep, nil diff --git a/service/mount.go b/service/mount.go index d34829a7..3cdf2313 100644 --- a/service/mount.go +++ b/service/mount.go @@ -5,14 +5,15 @@ import ( "fmt" "os" "path/filepath" + "strings" + + "time" csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/dell/gofsutil" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "os/exec" - "time" ) // Variables set only for unit testing. @@ -55,8 +56,8 @@ func GetDevice(path string) (*Device, error) { return &Device{ Name: fi.Name(), - FullPath: path, - RealDev: d, + FullPath: replaceBackslashWithSlash(path), + RealDev: replaceBackslashWithSlash(d), }, nil } @@ -85,12 +86,6 @@ func publishVolume( "volume capability required") } - accMode := volCap.GetAccessMode() - if accMode == nil { - return status.Error(codes.InvalidArgument, - "volume access mode required") - } - // make sure device is valid sysDevice, err := GetDevice(device) if err != nil { @@ -99,73 +94,37 @@ func publishVolume( id, err.Error()) } - isBlock := false - typeSet := false - - if blockVol := volCap.GetBlock(); blockVol != nil { - // Read-only is not supported for BlockVolume. Doing a read-only - // bind mount of the device to the target path does not prevent - // the underlying block device from being modified, so don't - // advertise a false sense of security - if ro { - return status.Error(codes.InvalidArgument, - "read only not supported for Block Volume") - } - - isBlock = true - typeSet = true + isBlock, mntVol, accMode, multiAccessFlag, err := validateVolumeCapability(volCap, ro) + if err != nil { + return err } - // make sure target is created - tgtStat, err := os.Stat(target) + // Make sure target is created. The spec says the driver is responsible + // for creating the target, but Kubernetes generallly creates the target. + privTgt := getPrivateMountPoint(privDir, id) + err = createTarget(target, isBlock) if err != nil { - if os.IsNotExist(err) { - if err != nil { - - if isBlock { - _, err = mkfile(target) - if err != nil { - return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create %s: %s", target, err.Error())) - } - } else { - _, err = mkdir(target) - if err != nil { - return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create %s: %s", target, err.Error())) - } - } - - } else { - - return status.Errorf(codes.Internal, - "failed to stat target, err: %s", err.Error()) - } - } + // Unmount and remove the private directory for the retry so clean start next time. + // K8S probably removed part of the path. + cleanupPrivateTarget(reqID, privTgt) + return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create %s: %s", target, err.Error())) } - tgtStat, _ = os.Stat(target) // make sure privDir exists and is a directory if _, err := mkdir(privDir); err != nil { return err } - mntVol := volCap.GetMount() - if mntVol != nil { - typeSet = true - } - if !typeSet { - return status.Error(codes.InvalidArgument, - "volume access type required") + // Handle block as a short cut + if isBlock { + // BLOCK only + mntFlags := mntVol.GetMountFlags() + err = mountBlock(sysDevice, target, mntFlags, singleAccessMode(accMode)) + return err } // check that target is right type for vol type - - if !(tgtStat.IsDir() == !isBlock) { - return status.Errorf(codes.FailedPrecondition, - "target: %s wrong type (file vs dir) Access Type", target) - } - // Path to mount device to - privTgt := getPrivateMountPoint(privDir, id) f := log.Fields{ "id": id, @@ -191,17 +150,13 @@ func publishVolume( log.WithFields(f).Debug("attempting mount to private area") // Make sure private mount point exists - var created bool - if isBlock { - created, err = mkfile(privTgt) - } else { - created, err = mkdir(privTgt) - } + created, err := mkdir(privTgt) if err != nil { return status.Errorf(codes.Internal, "Unable to create private mount point: %s", err.Error()) } + alreadyMounted := false if !created { log.WithFields(f).Debug("private mount target already exists") @@ -222,27 +177,31 @@ func publishVolume( } for _, m := range mnts { if m.Path == privTgt { - log.WithFields(f).WithField("mountedDevice", m.Device).Error( - "mount point already in use by device") - return status.Error(codes.Internal, - "Unable to use private mount point") + log.Debug(fmt.Sprintf("MOUNT: %#v", m)) + resolvedMountDevice := evalSymlinks(m.Device) + if resolvedMountDevice != sysDevice.RealDev { + log.WithFields(f).WithField("mountedDevice", m.Device).Error( + "mount point already in use by device") + return status.Error(codes.Internal, + "Mount point already in use by device") + } + alreadyMounted = true } } } - if !isBlock { + if !alreadyMounted { fs := mntVol.GetFsType() mntFlags := mntVol.GetMountFlags() - + if fs == "xfs" { + mntFlags = append(mntFlags, "nouuid") + } if err := handlePrivFSMount( ctx, accMode, sysDevice, mntFlags, fs, privTgt); err != nil { + // K8S may have removed the desired mount point. Clean up the private target. + cleanupPrivateTarget(reqID, privTgt) return err } - } else { - if err := gofsutil.BindMount(ctx, sysDevice.FullPath, privTgt); err != nil { - return status.Errorf(codes.Internal, - "failure bind-mounting block device to private mount: %s", err.Error()) - } } } else { @@ -250,30 +209,33 @@ func publishVolume( // mounted to the expected private mount, with correct rw/ro perms mounted := false for _, m := range devMnts { - if m.Path == privTgt { + if m.Path == target { + log.Printf("mount %#v already mounted to requested target %s", m, target) + } else if m.Path == privTgt { mounted = true - rwo := "rw" + rwo := multiAccessFlag if ro { rwo = "ro" } - if contains(m.Opts, rwo) { - log.WithFields(f).Debug( - "private mount already in place") - break + if rwo == "" || contains(m.Opts, rwo) { + log.WithFields(f).Debug("private mount already in place") } else { + log.WithFields(f).Printf("mount %#v rwo %s", m, rwo) return status.Error(codes.InvalidArgument, - "access mode conflicts with existing mounts") + "Access mode conflicts with existing mounts") } + } else if singleAccessMode(accMode) { + return status.Error(codes.FailedPrecondition, "Access mode conflicts with existing mounts") } } if !mounted { return status.Error(codes.Internal, - "device already in use and mounted elsewhere") + "Device already in use and mounted elsewhere") } } // Private mount in place, now bind mount to target path - devMnts, err = getDevMounts(sysDevice) + targetMnts, err := getPathMounts(target) if err != nil { return status.Errorf(codes.Internal, "could not reliably determine existing mount status: %s", @@ -282,19 +244,19 @@ func publishVolume( // If mounts already existed for this device, check if mount to // target path was already there - if len(devMnts) > 0 { - for _, m := range devMnts { + if len(targetMnts) > 0 { + for _, m := range targetMnts { if m.Path == target { // volume already published to target // if mount options look good, do nothing - rwo := "rw" - if accMode.GetMode() == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY || accMode.GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { + rwo := multiAccessFlag + if ro { rwo = "ro" } - if !contains(m.Opts, rwo) { + if rwo != "" && !contains(m.Opts, rwo) { + log.WithFields(f).Printf("mount %#v rwo %s\n", m, rwo) return status.Error(codes.Internal, "volume previously published with different options") - } // Existing mount satisfies request log.WithFields(f).Debug("volume already published to target") @@ -304,16 +266,29 @@ func publishVolume( } + // Recheck that target is created. k8s has this awful habit of deleting the target if it times out the request. + // This will narrow the window. + err = createTarget(target, isBlock) + if err != nil { + // Unmount and remove the private directory for the retry so clean start next time. + // K8S probably removed part of the path. + cleanupPrivateTarget(reqID, privTgt) + return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create %s: %s", target, err.Error())) + } + var mntFlags []string - if isBlock { - mntFlags = make([]string, 0) - } else { - mntFlags = mntVol.GetMountFlags() - if accMode.GetMode() == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY || accMode.GetMode() == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { - mntFlags = append(mntFlags, "ro") - } + mntFlags = mntVol.GetMountFlags() + if mntVol.FsType == "xfs" { + mntFlags = append(mntFlags, "nouuid") } + if ro || accMode.GetMode() == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY { + mntFlags = append(mntFlags, "ro") + } + if err := gofsutil.BindMount(ctx, privTgt, target, mntFlags...); err != nil { + // Unmount and remove the private directory for the retry so clean start next time. + // K8S probably removed part of the path. + cleanupPrivateTarget(reqID, privTgt) return status.Errorf(codes.Internal, "error publish volume to target path: %s", err.Error()) @@ -370,13 +345,19 @@ func contains(list []string, item string) bool { func mkfile(path string) (bool, error) { st, err := os.Stat(path) if os.IsNotExist(err) { + /* #nosec G302 */ file, err := os.OpenFile(path, os.O_CREATE, 0755) if err != nil { log.WithField("dir", path).WithError( err).Error("Unable to create dir") return false, err } - file.Close() + err = file.Close() + if err != nil { + // Log the error but keep going + log.WithField("file", path).WithError( + err).Error("Unable to close file") + } log.WithField("path", path).Debug("created file") return true, nil } @@ -391,6 +372,7 @@ func mkfile(path string) (bool, error) { func mkdir(path string) (bool, error) { st, err := os.Stat(path) if os.IsNotExist(err) { + /* #nosec G301 */ if err := os.Mkdir(path, 0755); err != nil { log.WithField("dir", path).WithError( err).Error("Unable to create dir") @@ -450,7 +432,7 @@ func unpublishVolume( tgtMnt := false privMnt := false for _, m := range mnts { - if m.Source == sysDevice.RealDev || m.Device == sysDevice.RealDev { + if m.Source == sysDevice.RealDev || m.Device == sysDevice.RealDev || m.Device == sysDevice.FullPath { if m.Path == privTgt { privMnt = true } else if m.Path == target { @@ -495,7 +477,9 @@ func unmountPrivMount( } log.WithField("directory", target).Debug( "removing directory") - os.Remove(target) + if err := os.Remove(target); err != nil { + log.Errorf("Unable to remove directory: %v", err) + } } return nil } @@ -518,18 +502,37 @@ func getDevMounts( return devMnts, nil } +// For Windows testing, replace any paths with \\ to have / +func replaceBackslashWithSlash(input string) string { + return strings.Replace(input, "\\", "/", -1) +} + +// getPathMounts finds all the mounts for a given path. +func getPathMounts(path string) ([]gofsutil.Info, error) { + ctx := context.Background() + devMnts := make([]gofsutil.Info, 0) + + mnts, err := gofsutil.GetMounts(ctx) + if err != nil { + return devMnts, err + } + for _, m := range mnts { + if m.Path == path { + devMnts = append(devMnts, m) + } + } + return devMnts, nil +} + func removeWithRetry(target string) error { var err error for i := 0; i < 3; i++ { err = os.Remove(target) if err != nil && !os.IsNotExist(err) { log.Error("error removing private mount target: " + err.Error()) - cmd := exec.Command("/usr/bin/rmdir", target) - textBytes, err := cmd.CombinedOutput() + err = os.RemoveAll(target) if err != nil { - log.Error("error calling rmdir: " + err.Error()) - } else { - log.Printf("rmdir output: %s", string(textBytes)) + log.Errorf("Error removing directory: %v", err.Error()) } time.Sleep(3 * time.Second) } else { @@ -539,3 +542,136 @@ func removeWithRetry(target string) error { } return err } + +// Evaulate symlinks to a resolution. In case of an error, +// logs the error but returns the original path. +func evalSymlinks(path string) string { + // eval any symlinks and make sure it points to a device + d, err := filepath.EvalSymlinks(path) + if err != nil { + log.Error("Could not evaluate symlinks for path: " + path) + return path + } + return d +} + +// Given a volume capability, validates it and returns: +// boolean isBlock -- the capability is for a block device +// csi.VolumeCapability_MountVolume - contains FsType and MountFlags +// csi.VolumeCapability_AccessMode accMode gives the access mode +// string multiAccessFlag - "rw" or "ro" or "" as appropriate +// error +func validateVolumeCapability(volCap *csi.VolumeCapability, readOnly bool) (bool, *csi.VolumeCapability_MountVolume, *csi.VolumeCapability_AccessMode, string, error) { + var mntVol *csi.VolumeCapability_MountVolume + isBlock := false + isMount := false + multiAccessFlag := "" + accMode := volCap.GetAccessMode() + if accMode == nil { + return false, mntVol, nil, "", status.Error(codes.InvalidArgument, "Volume Access Mode is required") + } + if blockVol := volCap.GetBlock(); blockVol != nil { + isBlock = true + switch accMode.GetMode() { + case csi.VolumeCapability_AccessMode_UNKNOWN: + return true, mntVol, accMode, "", status.Error(codes.InvalidArgument, "Unknown Access Mode") + case csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER: + case csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY: + case csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY: + multiAccessFlag = "ro" + case csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER: + case csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER: + multiAccessFlag = "rw" + } + if readOnly { + return true, mntVol, accMode, "", status.Error(codes.InvalidArgument, "read only not supported for Block Volume") + } + } + mntVol = volCap.GetMount() + if mntVol != nil { + isMount = true + switch accMode.GetMode() { + case csi.VolumeCapability_AccessMode_UNKNOWN: + return false, mntVol, accMode, "", status.Error(codes.InvalidArgument, "Unknown Access Mode") + case csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER: + case csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY: + case csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY: + multiAccessFlag = "ro" + case csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER: + case csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER: + return false, mntVol, accMode, "", status.Error(codes.AlreadyExists, "Mount volumes do not support AccessMode MULTI_NODE_MULTI_WRITER") + } + } + + if !isBlock && !isMount { + return false, mntVol, accMode, "", status.Error(codes.InvalidArgument, "Volume Access Type is required") + } + return isBlock, mntVol, accMode, multiAccessFlag, nil +} + +// singleAccessMode returns true if only a single access is allowed SINGLE_NODE_WRITER or SINGLE_NODE_READER_ONLY +func singleAccessMode(accMode *csi.VolumeCapability_AccessMode) bool { + switch accMode.GetMode() { + case csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER: + return true + case csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY: + return true + } + return false +} + +func createTarget(target string, isBlock bool) error { + var err error + // Make sure target is created. The spec says the driver is responsible + // for creating the target, but Kubernetes generallly creates the target. + if isBlock { + _, err = mkfile(target) + if err != nil { + return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create %s: %s", target, err.Error())) + } + } else { + _, err = mkdir(target) + if err != nil { + return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create %s: %s", target, err.Error())) + } + } + return nil +} + +// cleanupPrivateTarget unmounts and removes the private directory for the retry so clean start next time. +func cleanupPrivateTarget(reqID, privTgt string) { + log.WithField("CSIRequestID", reqID).WithField("privTgt", privTgt).Info("Cleaning up private target") + if privErr := gofsutil.Unmount(context.Background(), privTgt); privErr != nil { + log.WithField("CSIRequestID", reqID).Printf("Error unmounting privTgt %s: %s", privTgt, privErr) + } + if privErr := removeWithRetry(privTgt); privErr != nil { + log.WithField("CSIRequestID", reqID).Printf("Error removing privTgt %s: %s", privTgt, privErr) + } +} + +// mountBlock bind mounts the device to the required target +func mountBlock(device *Device, target string, mntFlags []string, singleAccess bool) error { + log.Printf("mountBlock called device %#v target %s mntFlags %#v", device, target, mntFlags) + // Check to see if already mounted + mnts, err := getDevMounts(device) + if err != nil { + return status.Errorf(codes.Internal, "Could not getDevMounts for: %s", device.RealDev) + } + for _, mnt := range mnts { + if mnt.Path == target { + log.Info("Block volume target is already mounted") + return nil + } else if singleAccess { + return status.Error(codes.InvalidArgument, "Access mode conflicts with existing mounts") + } + } + err = createTarget(target, true) + if err != nil { + return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create %s: %s", target, err.Error())) + } + err = gofsutil.BindMount(context.Background(), device.RealDev, target, mntFlags...) + if err != nil { + return status.Errorf(codes.Internal, "error bind mounting to target path: %s", target) + } + return nil +} diff --git a/service/node.go b/service/node.go index 10d4fd62..656028dc 100644 --- a/service/node.go +++ b/service/node.go @@ -3,12 +3,14 @@ package service import ( "bufio" "bytes" + "fmt" "os" "os/exec" "strings" "time" csi "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/dell/gofsutil" "github.com/dell/goscaleio" log "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -22,7 +24,8 @@ const ( ) var ( - getMappedVolMaxRetry = 20 + getMappedVolMaxRetry = 30 + connectedSystemID = make([]string, 0) ) func (s *service) NodeStageVolume( @@ -65,21 +68,9 @@ func (s *service) NodePublishVolume( id := req.GetVolumeId() log.Printf("NodePublishVolume id: %s", id) - // If not found immediately, give a little time for controller to - // communicate with SDC that it has volume - var sdcMappedVol *goscaleio.SdcMappedVolume - var err error - for i := 0; i < getMappedVolMaxRetry; i++ { - sdcMappedVol, err = getMappedVol(id) - if sdcMappedVol != nil { - break - } - log.Printf("Node publish getMappedVol retry: %d", i) - time.Sleep(1 * time.Second) - } + sdcMappedVol, err := s.getSDCMappedVol(id) if err != nil { - log.Printf("NodePublishVolume returning not published to node: %s", id) - return nil, err + return nil, status.Error(codes.InvalidArgument, err.Error()) } if err := publishVolume(req, s.privDir, sdcMappedVol.SdcDevice, reqID); err != nil { @@ -102,12 +93,20 @@ func (s *service) NodeUnpublishVolume( } } + targetPath := req.GetTargetPath() + if targetPath == "" { + return nil, status.Error(codes.InvalidArgument, "A target path argument is required") + } + s.logStatistics() id := req.GetVolumeId() log.Printf("NodeUnublishVolume id: %s", id) - sdcMappedVol, err := getMappedVol(id) + sdcMappedVol, err := s.getSDCMappedVol(id) if err != nil { + // fix k8s 19 bug: ControllerUnpublishVolume is called before NodeUnpublishVolume + _ = gofsutil.Unmount(ctx, targetPath) + // Idempotent need to return ok if not published return &csi.NodeUnpublishVolumeResponse{}, nil } @@ -116,15 +115,35 @@ func (s *service) NodeUnpublishVolume( return nil, err } - privTgt := req.GetTargetPath() - if privTgt == "" { - return nil, status.Error(codes.InvalidArgument, "A Staging Target argument is required") + _ = gofsutil.Unmount(ctx, targetPath) + + if err := removeWithRetry(targetPath); err != nil { + log.Errorf("Unable to remove target path: %v", err) } - removeWithRetry(privTgt) return &csi.NodeUnpublishVolumeResponse{}, nil } +func (s *service) getSDCMappedVol(volumeID string) (*goscaleio.SdcMappedVolume, error) { + // If not found immediately, give a little time for controller to + // communicate with SDC that it has volume + var sdcMappedVol *goscaleio.SdcMappedVolume + var err error + for i := 0; i < getMappedVolMaxRetry; i++ { + sdcMappedVol, err = getMappedVol(volumeID) + if sdcMappedVol != nil { + break + } + log.Printf("Node publish getMappedVol retry: %d", i) + time.Sleep(1 * time.Second) + } + if err != nil { + log.Printf("SDC returned volume %s not published to node", volumeID) + return nil, err + } + return sdcMappedVol, err +} + // Get the volumes published to the SDC (given by SdcMappedVolume) and scan for requested vol id func getMappedVol(id string) (*goscaleio.SdcMappedVolume, error) { // get source path of volume/device @@ -148,8 +167,67 @@ func getMappedVol(id string) (*goscaleio.SdcMappedVolume, error) { return sdcMappedVol, nil } +func (s *service) getAllSystems(ctx context.Context, systems []string) error { + // Create our ScaleIO API client, if needed + if s.adminClient == nil { + // create a new client + c, err := goscaleio.NewClientWithArgs( + s.opts.Endpoint, "", s.opts.Insecure, !s.opts.DisableCerts) + if err != nil { + e := fmt.Errorf("Unable to create ScaleIO client: %s", err.Error()) + log.Error(e) + return e + } + // authenticate to this client + _, err = c.Authenticate(&goscaleio.ConfigConnect{ + Endpoint: s.opts.Endpoint, + Username: s.opts.User, + Password: s.opts.Password, + }) + if err != nil { + e := fmt.Errorf("Unable to create ScaleIO client: %s", err.Error()) + log.Error(e) + return e + } + // success! Save the client for later use + s.adminClient = c + } + + // get the systemNames for all of the systemIDs in connectedSystemID + if s.adminClient != nil { + connectedSystemName := make([]string, 0) + for _, i := range systems { + sys, err := s.adminClient.FindSystem(i, i, "") + if err != nil { + // could not find the name for this system. Log a message and keep going + e := fmt.Errorf("Unable to find VxFlex OS system name matching system ID: %s. Error is %v", i, err) + log.Error(e) + } else { + if sys.System == nil || sys.System.Name == "" { + // system does not have a name, this is fine + log.Printf("Found system without a name, system ID: %s", i) + } else { + log.Printf("Found system Name: %s", sys.System.Name) + connectedSystemName = append(connectedSystemName, sys.System.Name) + } + } + } + for _, n := range connectedSystemName { + connectedSystemID = append(connectedSystemID, n) + } + } + return nil +} + func (s *service) nodeProbe(ctx context.Context) error { + // make sure the kernel module is loaded + if !kmodLoaded(s.opts) { + return status.Error(codes.FailedPrecondition, + "scini kernel module not loaded") + } + + // fetch the SDC GUID if s.opts.SdcGUID == "" { // try to get GUID using `drv_cfg` binary if _, err := os.Stat(drvCfg); os.IsNotExist(err) { @@ -167,11 +245,18 @@ func (s *service) nodeProbe(ctx context.Context) error { log.WithField("guid", s.opts.SdcGUID).Info("set SDC GUID") } - if !kmodLoaded(s.opts) { - return status.Error(codes.FailedPrecondition, - "scini kernel module not loaded") + // fetch the systemIDs + var err error + connectedSystemID, err = getSystemsKnownToSDC(s.opts) + if err != nil { + return status.Errorf(codes.FailedPrecondition, "%s", err) } + // get all the system names and IDs. + // ignore the errors here as all the information is supplementary + /* #nosec G104 */ + s.getAllSystems(ctx, connectedSystemID) + // make sure privDir is pre-created if _, err := mkdir(s.privDir); err != nil { return status.Errorf(codes.Internal, @@ -182,6 +267,22 @@ func (s *service) nodeProbe(ctx context.Context) error { return nil } +// getStringInBetween returns empty string if no start or end string found +func getStringInBetween(str string, start string, end string) (result string) { + s := strings.Index(str, start) + if s == -1 { + return + } + s += len(start) + e := strings.Index(str[s:], end) + if e == -1 { + return + } + + contents := str[s : s+e] + return strings.TrimSpace(contents) +} + func kmodLoaded(opts Opts) bool { // opts.Lsmod is introduced solely for unit testing. var out []byte @@ -210,16 +311,66 @@ func kmodLoaded(opts Opts) bool { return false } +func getSystemsKnownToSDC(opts Opts) ([]string, error) { + var out []byte + var err error + systems := make([]string, 0) + + // fetch the systemIDs + if opts.drvCfgQueryMDM == "" { + // try to get system name using `drv_cfg` binary + if _, err := os.Stat(drvCfg); os.IsNotExist(err) { + return systems, status.Error(codes.FailedPrecondition, + "unable to get System Name via config or drv_cfg binary") + } + + out, err = exec.Command(drvCfg, "--query_mdms").CombinedOutput() + if err != nil { + return systems, status.Errorf(codes.FailedPrecondition, + "error getting System ID: %s", err.Error()) + } + } else { + out = []byte(opts.drvCfgQueryMDM) + } + + r := bytes.NewReader(out) + s := bufio.NewScanner(r) + + for s.Scan() { + // the System ID is the field titled "Installation ID" + sysID := getStringInBetween(s.Text(), "MDM-ID", "SDC") + if sysID != "" { + systems = append(systems, sysID) + log.WithField("ID", sysID).Info("Found connected system") + } + } + + return systems, nil +} + func (s *service) NodeGetCapabilities( ctx context.Context, req *csi.NodeGetCapabilitiesRequest) ( *csi.NodeGetCapabilitiesResponse, error) { - return &csi.NodeGetCapabilitiesResponse{}, nil + return &csi.NodeGetCapabilitiesResponse{ + Capabilities: []*csi.NodeServiceCapability{ + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }, + }, + }, nil + } -// Minimal version of NodeGetInfo. Returns NodeId -// MaxVolumesPerNode (optional) is left as 0 which means unlimited, and AccessibleTopology is left nil. +// NodeGetInfo returns Node information +// NodeId is the identifier of the node and will match the SDC GUID +// MaxVolumesPerNode (optional) is left as 0 which means unlimited +// AccessibleTopology will be set with the VxFlex OS SystemID func (s *service) NodeGetInfo( ctx context.Context, req *csi.NodeGetInfoRequest) ( @@ -236,7 +387,32 @@ func (s *service) NodeGetInfo( return nil, err } } - return &csi.NodeGetInfoResponse{NodeId: s.opts.SdcGUID}, nil + + // Get the Node ID + if len(connectedSystemID) == 0 { + if !s.opts.AutoProbe { + return nil, status.Error(codes.FailedPrecondition, + "Unable to get Node ID. Either it is not configured, "+ + "or Node Service has not been probed") + } + if err := s.nodeProbe(ctx); err != nil { + return nil, err + } + } + + // Create the topology keys + // csi-vxflexos.dellemc.com/: + topology := map[string]string{} + for _, sysID := range connectedSystemID { + topology[Name+"/"+sysID] = SystemTopologySystemValue + } + + return &csi.NodeGetInfoResponse{ + NodeId: s.opts.SdcGUID, + AccessibleTopology: &csi.Topology{ + Segments: topology, + }, + }, nil } func (s *service) NodeGetVolumeStats( @@ -246,5 +422,80 @@ func (s *service) NodeGetVolumeStats( } func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { - return nil, status.Error(codes.Unimplemented, "") + + var reqID string + var err error + headers, ok := metadata.FromIncomingContext(ctx) + if ok { + if req, ok := headers["csi.requestid"]; ok && len(req) > 0 { + reqID = req[0] + } + } + + err = s.nodeProbe(ctx) + if err != nil { + log.Error("nodeProbe failed with error :" + err.Error()) + return nil, err + } + + volumePath := req.GetVolumePath() + if volumePath == "" { + log.Error("Volume path required") + return nil, status.Error(codes.InvalidArgument, + "Volume path required") + } + + // Check if volume path is a directory. + // Mount type volumes are always mounted on a directory. + // If not a directory, assume it's a raw block device mount and return ok. + volumePathInfo, err := os.Lstat(volumePath) + if err != nil { + return nil, status.Error(codes.InvalidArgument, "Could not stat volume path: "+volumePath) + } + if !volumePathInfo.Mode().IsDir() { + log.Infof("Volume path %s is not a directory- assuming a raw block device mount", volumePath) + return &csi.NodeExpandVolumeResponse{}, nil + } + + volID := req.GetVolumeId() + if volID == "" { + return nil, status.Error(codes.InvalidArgument, + "Volume ID is required") + } + + sdcMappedVolume, err := s.getSDCMappedVol(volID) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + log.Infof("sdcMappedVolume %+v", sdcMappedVolume) + sdcDevice := strings.Replace(sdcMappedVolume.SdcDevice, "/dev/", "", 1) + log.Infof("sdcDevice %s", sdcDevice) + devicePath := sdcMappedVolume.SdcDevice + + size := req.GetCapacityRange().GetRequiredBytes() + + f := log.Fields{ + "CSIRequestID": reqID, + "DevicePath": devicePath, + "VolumeID": volID, + "VolumePath": volumePath, + "Size": size, + } + log.WithFields(f).Info("resizing volume") + fsType, err := gofsutil.FindFSType(context.Background(), volumePath) + if err != nil { + log.Errorf("Failed to fetch filesystem type for mount (%s) with error (%s)", volumePath, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + log.Infof("Found %s filesystem mounted on volume %s", fsType, volumePath) + + // Resize the filesystem + err = gofsutil.ResizeFS(context.Background(), volumePath, devicePath, "", fsType) + if err != nil { + log.Errorf("Failed to resize filesystem: mountpoint (%s) device (%s) with error (%s)", + volumePath, devicePath, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + + return &csi.NodeExpandVolumeResponse{}, nil } diff --git a/service/service.go b/service/service.go index c7eb6961..9960cbf4 100644 --- a/service/service.go +++ b/service/service.go @@ -32,6 +32,9 @@ const ( thinProvisioned = "ThinProvisioned" thickProvisioned = "ThickProvisioned" defaultPrivDir = "/dev/disk/csi-vxflexos" + + // SystemTopologySystemValue is the supported topology key + SystemTopologySystemValue string = "csi-vxflexos.dellemc.com" ) // Manifest is the SP's manifest. @@ -62,6 +65,7 @@ type Opts struct { AutoProbe bool DisableCerts bool // used for unit testing only Lsmod string // used for unit testing only + drvCfgQueryMDM string // used for testing only EnableSnapshotCGDelete bool // when snapshot deleted, enable deleting of all snaps in the CG of the snapshot EnableListVolumesSnapshots bool // when listing volumes, include snapshots and volumes } @@ -113,11 +117,8 @@ func (s *service) BeforeServe( if s.opts.Password != "" { fields["password"] = "******" } - - //censor user for logging purposes - fields["user"] = "******" + log.WithFields(fields).Infof("configured %s", Name) - fields["user"] = s.opts.User }() // Get the SP's operating mode. diff --git a/service/service_test.go b/service/service_test.go index a395169d..b020a961 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -21,7 +21,7 @@ func TestMain(m *testing.M) { }, godog.Options{ Format: "pretty", Paths: []string{"features"}, - //Tags: "wip", + //Tags: "wip", }) fmt.Printf("godog finished\n") diff --git a/service/step_defs_test.go b/service/step_defs_test.go index c7cbfef3..d14faf80 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -32,11 +32,15 @@ const ( altNodeID = "7E012974-3651-4DCB-9954-25975A3C3CDF" datafile = "test/tmp/datafile" datadir = "test/tmp/datadir" + badtarget = "/nonexist/target" + altdatadir = "test/tmp/altdatadir" + altdatafile = "test/tmp/altdatafile" sdcVolume1 = "d0f055a700000000" sdcVolume2 = "d0f055aa00000001" sdcVolume0 = "0000000000000000" mdmID = "0000" nodePublishBlockDevicePath = "test/dev/scinia" + nodePublishAltBlockDevPath = "test/dev/scinib" nodePublishSymlinkDir = "test/dev/disk/by-id" goodSnapID = "444-444" altSnapID = "555-555" @@ -48,7 +52,7 @@ type feature struct { service *service adminClient *goscaleio.Client system *goscaleio.System - err error // return from the preceeding call + err error // return from the preceding call getPluginInfoResponse *csi.GetPluginInfoResponse getPluginCapabilitiesResponse *csi.GetPluginCapabilitiesResponse probeResponse *csi.ProbeResponse @@ -82,6 +86,7 @@ type feature struct { createSnapshotRequest *csi.CreateSnapshotRequest volumeIDList []string snapshotIndex int + volumeID string } func (f *feature) checkGoRoutines(tag string) { @@ -145,6 +150,8 @@ func (f *feature) aVxFlexOSService() error { gofsutil.GOFSMock.InduceFormatError = false gofsutil.GOFSMock.InduceGetDiskFormatError = false gofsutil.GOFSMock.InduceGetDiskFormatType = "" + gofsutil.GOFSMock.InduceFSTypeError = false + gofsutil.GOFSMock.InduceResizeFSError = false gofsutil.GOFSMockMounts = gofsutil.GOFSMockMounts[:0] // configure variables in the driver @@ -196,6 +203,9 @@ Module Size Used by vsock_diag 12610 0 scini 799210 0 ip6t_rpfilter 12595 1 +` + opts.drvCfgQueryMDM = ` +MDM-ID 14dbbf5617523654 SDC ID d0f33bd700000004 INSTALLATION ID 1c078b073d75512c IPs [0]-1.2.3.4 [1]-1.2.3.5 ` svc.opts = opts f.service = svc @@ -277,6 +287,12 @@ func (f *feature) theErrorContains(arg1 string) error { } // We expected an error... if f.err == nil { + possibleMatches := strings.Split(arg1, "@@") + for _, possibleMatch := range possibleMatches { + if possibleMatch == "none" { + return nil + } + } return fmt.Errorf("Expected error to contain %s but no error", arg1) } // Allow for multiple possible matches, separated by @@. This was necessary @@ -315,6 +331,11 @@ func (f *feature) thereIsANodeProbeSdcGUIDError() error { return nil } +func (f *feature) thereIsANodeProbeDrvCfgError() error { + f.service.opts.drvCfgQueryMDM = "" + return nil +} + func getTypicalCreateVolumeRequest() *csi.CreateVolumeRequest { req := new(csi.CreateVolumeRequest) params := make(map[string]string) @@ -322,7 +343,7 @@ func getTypicalCreateVolumeRequest() *csi.CreateVolumeRequest { req.Parameters = params req.Name = "volume1" capacityRange := new(csi.CapacityRange) - capacityRange.RequiredBytes = 8 * 1024 * 1024 * 1024 + capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 req.CapacityRange = capacityRange block := new(csi.VolumeCapability_BlockVolume) capability := new(csi.VolumeCapability) @@ -345,7 +366,7 @@ func (f *feature) iSpecifyCreateVolumeMountRequest(fstype string) error { req.Parameters = params req.Name = "mount1" capacityRange := new(csi.CapacityRange) - capacityRange.RequiredBytes = 8 * 1024 * 1024 * 1024 + capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 req.CapacityRange = capacityRange capability := new(csi.VolumeCapability) mountVolume := new(csi.VolumeCapability_MountVolume) @@ -400,16 +421,24 @@ func (f *feature) aValidCreateVolumeResponseIsReturned() error { return nil } -func (f *feature) iSpecifyAccessibilityRequirements() error { +func (f *feature) iSpecifyAccessibilityRequirementsWithASystemIDOf(requestedSystem string) error { + if requestedSystem == "f.service.opt.SystemName" { + requestedSystem = f.service.opts.SystemName + } req := new(csi.CreateVolumeRequest) params := make(map[string]string) params["storagepool"] = "viki_pool_HDD_20181031" req.Parameters = params req.Name = "accessability" capacityRange := new(csi.CapacityRange) - capacityRange.RequiredBytes = 8 * 1024 * 1024 * 1024 + capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 req.CapacityRange = capacityRange req.AccessibilityRequirements = new(csi.TopologyRequirement) + top := new(csi.Topology) + top.Segments = map[string]string{ + "csi-vxflexos.dellemc.com/" + requestedSystem: "powerflex.dellemc.com", + } + req.AccessibilityRequirements.Preferred = append(req.AccessibilityRequirements.Preferred, top) f.createVolumeRequest = req return nil } @@ -430,7 +459,7 @@ func (f *feature) iSpecifyMULTINODEWRITER() error { req.Parameters = params req.Name = "multinode_writer" capacityRange := new(csi.CapacityRange) - capacityRange.RequiredBytes = 8 * 1024 * 1024 * 1024 + capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 req.CapacityRange = capacityRange block := new(csi.VolumeCapability_BlockVolume) capability := new(csi.VolumeCapability) @@ -450,7 +479,7 @@ func (f *feature) iSpecifyMULTINODEWRITER() error { func (f *feature) iSpecifyABadCapacity() error { req := getTypicalCreateVolumeRequest() capacityRange := new(csi.CapacityRange) - capacityRange.RequiredBytes = -8 * 1024 * 1024 * 1024 + capacityRange.RequiredBytes = -32 * 1024 * 1024 * 1024 req.CapacityRange = capacityRange req.Name = "bad capacity" f.createVolumeRequest = req @@ -481,6 +510,7 @@ func (f *feature) iCallCreateVolumeSize(name string, size int64) error { if f.createVolumeResponse != nil { log.Printf("vol id %s\n", f.createVolumeResponse.GetVolume().VolumeId) } + return nil } @@ -534,6 +564,10 @@ func (f *feature) iInduceError(errtype string) error { stepHandlersErrors.RemoveVolumeError = true case "VolumeInstancesError": stepHandlersErrors.VolumeInstancesError = true + case "NoVolumeIDError": + stepHandlersErrors.NoVolumeIDError = true + case "SetVolumeSizeError": + stepHandlersErrors.SetVolumeSizeError = true case "NoSymlinkForNodePublish": cmd := exec.Command("rm", "-rf", nodePublishSymlinkDir) _, err := cmd.CombinedOutput() @@ -567,12 +601,47 @@ func (f *feature) iInduceError(errtype string) error { f.nodePublishVolumeRequest.VolumeCapability.AccessMode = nil case "NodePublishNoAccessType": f.nodePublishVolumeRequest.VolumeCapability.AccessType = nil + case "NodePublishPrivateTargetAlreadyCreated": + err := os.MkdirAll("features/"+sdcVolume1, 0777) + if err != nil { + fmt.Printf("Couldn't make: %s\n", datadir+"/"+sdcVolume1) + } + case "NodePublishPrivateTargetAlreadyMounted": + cmd := exec.Command("mknod", nodePublishAltBlockDevPath, "b", "0", "0") + _, err := cmd.CombinedOutput() + if err != nil { + fmt.Printf("Couldn't create block dev: %s\n", nodePublishAltBlockDevPath) + } + err = os.MkdirAll("features/"+sdcVolume1, 0777) + if err != nil { + fmt.Printf("Couldn't make: %s\n", datadir+"/"+sdcVolume1) + } + err = gofsutil.Mount(context.Background(), nodePublishAltBlockDevPath, "features\\"+sdcVolume1, "none") + if err != nil { + fmt.Printf("Couldn't mount: %s\n", "features\\"+sdcVolume1) + } case "NodePublishNoTargetPath": f.nodePublishVolumeRequest.TargetPath = "" + case "NodePublishBadTargetPath": + f.nodePublishVolumeRequest.TargetPath = badtarget case "NodePublishBlockTargetNotFile": f.nodePublishVolumeRequest.TargetPath = datadir case "NodePublishFileTargetNotDir": f.nodePublishVolumeRequest.TargetPath = datafile + case "NodePublishPathAltDataDir": + if f.nodePublishVolumeRequest.TargetPath == datadir { + err := os.MkdirAll(altdatadir, 0777) + if err != nil { + fmt.Printf("Couldn't make altdatadir: %s\n", altdatadir) + } + f.nodePublishVolumeRequest.TargetPath = altdatadir + } else { + _, err := os.Create(altdatafile) + if err != nil { + fmt.Printf("Couldn't make datafile: %s\n", altdatafile) + } + f.nodePublishVolumeRequest.TargetPath = altdatafile + } case "GOFSMockBindMountError": gofsutil.GOFSMock.InduceBindMountError = true case "GOFSMockDevMountsError": @@ -589,10 +658,16 @@ func (f *feature) iInduceError(errtype string) error { gofsutil.GOFSMock.InduceGetDiskFormatType = "unknown-fs" case "GOFSMockFormatError": gofsutil.GOFSMock.InduceFormatError = true + case "GOFSInduceFSTypeError": + gofsutil.GOFSMock.InduceFSTypeError = true + case "GOFSInduceResizeFSError": + gofsutil.GOFSMock.InduceResizeFSError = true case "NodeUnpublishNoTargetPath": f.nodePublishVolumeRequest.TargetPath = "" case "NodeUnpublishBadVolume": f.nodePublishVolumeRequest.VolumeId = sdcVolume0 + case "none": + return nil default: return fmt.Errorf("Don't know how to induce error %q", errtype) } @@ -840,6 +915,7 @@ func (f *feature) aValidNodeGetInfoResponseIsReturned() error { if f.err != nil { return f.err } + fmt.Printf("node: %s", f.nodeGetInfoResponse) if f.nodeGetInfoResponse.NodeId == "" { return errors.New("expected NodeGetInfoResponse to contain NodeID but it was null") } @@ -1022,11 +1098,13 @@ func (f *feature) aValidControllerGetCapabilitiesResponseIsReturned() error { count = count + 1 case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: count = count + 1 + case csi.ControllerServiceCapability_RPC_EXPAND_VOLUME: + count = count + 1 default: return fmt.Errorf("received unexpected capability: %v", typex) } } - if count != 6 { + if count != 7 { return errors.New("Did not retrieve all the expected capabilities") } return nil @@ -1192,9 +1270,9 @@ func (f *feature) aControllerPublishedVolume() error { cmd := exec.Command("rm", "-rf", "features/"+sdcVolume1) _, err = cmd.CombinedOutput() if err != nil { - fmt.Printf("error removing private staging directory") + fmt.Printf("error removing private staging directory\n") } else { - fmt.Printf("removed private staging directory") + fmt.Printf("removed private staging directory\n") } // Make the block device @@ -1203,7 +1281,7 @@ func (f *feature) aControllerPublishedVolume() error { cmd := exec.Command("mknod", nodePublishBlockDevicePath, "b", "0", "0") output, err := cmd.CombinedOutput() if err != nil { - fmt.Printf("scinia: " + err.Error()) + fmt.Printf("scinia: %s\n", err.Error()) } fmt.Printf("mknod output: %s\n", output) @@ -1213,7 +1291,8 @@ func (f *feature) aControllerPublishedVolume() error { output, err = cmd.CombinedOutput() fmt.Printf("symlink output: %s\n", output) if err != nil { - fmt.Printf("link: " + err.Error()) + fmt.Printf("link: %s\n", err.Error()) + err = nil } } @@ -1333,6 +1412,8 @@ func (f *feature) iCallBeforeServe() error { stringSlice = append(stringSlice, EnvPassword+"=password") stringSlice = append(stringSlice, EnvSystemName+"=unknown") stringSlice = append(stringSlice, "X_CSI_PRIVATE_MOUNT_DIR=/csi") + stringSlice = append(stringSlice, "X_CSI_VXFLEXOS_ENABLESNAPSHOTCGDELETE=true") + stringSlice = append(stringSlice, "X_CSI_VXFLEXOS_ENABLELISTVOLUMESNAPSHOTS=true") ctx := context.WithValue(context.Background(), ctxOSEnviron, stringSlice) listener, err := net.Listen("tcp", "127.0.0.1:65000") if err != nil { @@ -1350,17 +1431,33 @@ func (f *feature) iCallNodeStageVolume() error { return nil } -func (f *feature) iCallControllerExpandVolume() error { - ctx := new(context.Context) - req := new(csi.ControllerExpandVolumeRequest) - _, f.err = f.service.ControllerExpandVolume(*ctx, req) +func (f *feature) iCallControllerExpandVolume(size int64) error { + header := metadata.New(map[string]string{"csi.requestid": "1"}) + ctx := metadata.NewIncomingContext(context.Background(), header) + f.volumeID = f.createVolumeResponse.GetVolume().VolumeId + req := &csi.ControllerExpandVolumeRequest{ + VolumeId: f.volumeID, + CapacityRange: &csi.CapacityRange{RequiredBytes: size * bytesInKiB * bytesInKiB * bytesInKiB}, + } + if stepHandlersErrors.NoVolumeIDError { + req.VolumeId = "" + } + _, f.err = f.service.ControllerExpandVolume(ctx, req) return nil } -func (f *feature) iCallNodeExpandVolume() error { - ctx := new(context.Context) - req := new(csi.NodeExpandVolumeRequest) - _, f.err = f.service.NodeExpandVolume(*ctx, req) +func (f *feature) iCallNodeExpandVolume(volPath string) error { + header := metadata.New(map[string]string{"csi.requestid": "1"}) + ctx := metadata.NewIncomingContext(context.Background(), header) + f.volumeID = f.createVolumeResponse.Volume.VolumeId + req := &csi.NodeExpandVolumeRequest{ + VolumeId: sdcVolume1, + VolumePath: volPath, + } + if stepHandlersErrors.NoVolumeIDError { + req.VolumeId = "" + } + _, f.err = f.service.NodeExpandVolume(ctx, req) return nil } @@ -1389,10 +1486,27 @@ func (f *feature) aValidNodeGetCapabilitiesResponseIsReturned() error { if f.err != nil { return f.err } - if len(f.nodeGetCapabilitiesResponse.Capabilities) > 0 { - return errors.New("expected NodeGetCapabilities to return no capabilities") + rep := f.nodeGetCapabilitiesResponse + if rep != nil { + if rep.Capabilities == nil { + return errors.New("no capabilities returned in NodeGetCapabilitiesResponse") + } + count := 0 + for _, cap := range rep.Capabilities { + typex := cap.GetRpc().Type + switch typex { + case csi.NodeServiceCapability_RPC_EXPAND_VOLUME: + count = count + 1 + default: + return fmt.Errorf("received unxexpcted capability: %v", typex) + } + } + if count != 1 { + return errors.New("Did not retrieve all the expected capabilities") + } + return nil } - return nil + return errors.New("expected NodeGetCapabilitiesResponse but didn't get one") } func (f *feature) iCallCreateSnapshot(snapName string) error { @@ -1472,7 +1586,7 @@ func (f *feature) aValidSnapshotConsistencyGroup() error { volumeIDToAncestorID[goodSnapID] = goodVolumeID volumeIDToConsistencyGroupID[goodSnapID] = goodVolumeID - // second snapshot in CG; this looks wierd, but we give same ID to snap + // second snapshot in CG; this looks weird, but we give same ID to snap // as it's ancestor so that we can publish the volume volumeIDToName[altSnapID] = "snap5" volumeNameToID["snap5"] = altSnapID @@ -1661,8 +1775,17 @@ func (f *feature) iInvalidateTheProbeCache() error { return nil } -func (f *feature) iCallGetDevice(invalidPath string) error { - device, err := GetDevice(invalidPath) +func (f *feature) iCallEvalsymlink(path string) error { + + d := evalSymlinks(path) + if d == path { + f.err = errors.New("Could not evaluate symlinks for path") + } + return nil +} + +func (f *feature) iCallGetDevice(Path string) error { + device, err := GetDevice(Path) if device == nil && err != nil { f.err = errors.New("invalid path error") } @@ -1706,6 +1829,31 @@ func (f *feature) iCallGetStoragePoolnameByID(id string) error { return nil } +func (f *feature) iCallNodeGetAllSystems() error { + // lookup the system names for a couple of systems + // This should not generate an error as systems without names are supported + systems := make([]string, 0) + systems = append(systems, "14dbbf5617523654") + systems = append(systems, "9999999999999999") + f.err = f.service.getAllSystems(context.TODO(), systems) + return nil +} + +func (f *feature) iDoNotHaveAGatewayConnection() error { + f.service.adminClient = nil + return nil +} + +func (f *feature) iDoNotHaveAValidGatewayEndpoint() error { + f.service.opts.Endpoint = "" + return nil +} + +func (f *feature) iDoNotHaveAValidGatewayPassword() error { + f.service.opts.Password = "" + return nil +} + func FeatureContext(s *godog.Suite) { f := &feature{} s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService) @@ -1725,9 +1873,10 @@ func FeatureContext(s *godog.Suite) { s.Step(`^the Controller has no connection$`, f.theControllerHasNoConnection) s.Step(`^there is a Node Probe Lsmod error$`, f.thereIsANodeProbeLsmodError) s.Step(`^there is a Node Probe SdcGUID error$`, f.thereIsANodeProbeSdcGUIDError) + s.Step(`^there is a Node Probe drvCfg error$`, f.thereIsANodeProbeDrvCfgError) s.Step(`^I call CreateVolume "([^"]*)"$`, f.iCallCreateVolume) s.Step(`^a valid CreateVolumeResponse is returned$`, f.aValidCreateVolumeResponseIsReturned) - s.Step(`^I specify AccessibilityRequirements$`, f.iSpecifyAccessibilityRequirements) + s.Step(`^I specify AccessibilityRequirements with a SystemID of "([^"]*)"$`, f.iSpecifyAccessibilityRequirementsWithASystemIDOf) s.Step(`^I specify MULTINODE_WRITER$`, f.iSpecifyMULTINODEWRITER) s.Step(`^I specify a BadCapacity$`, f.iSpecifyABadCapacity) s.Step(`^I specify NoStoragePool$`, f.iSpecifyNoStoragePool) @@ -1794,8 +1943,8 @@ func FeatureContext(s *godog.Suite) { s.Step(`^I call ListSnapshots for snapshot "([^"]*)"$`, f.iCallListSnapshotsForSnapshot) s.Step(`^the snapshot ID is "([^"]*)"$`, f.theSnapshotIDIs) s.Step(`^I invalidate the Probe cache$`, f.iInvalidateTheProbeCache) - s.Step(`^I call ControllerExpandVolume$`, f.iCallControllerExpandVolume) - s.Step(`^I call NodeExpandVolume$`, f.iCallNodeExpandVolume) + s.Step(`^I call ControllerExpandVolume set to (\d+)$`, f.iCallControllerExpandVolume) + s.Step(`^I call NodeExpandVolume with volumePath as "([^"]*)"$`, f.iCallNodeExpandVolume) s.Step(`^I call NodeGetVolumeStats$`, f.iCallNodeGetVolumeStats) s.Step(`^I give request volume context$`, f.iGiveRequestVolumeContext) s.Step(`^I call GetDevice "([^"]*)"$`, f.iCallGetDevice) @@ -1803,4 +1952,9 @@ func FeatureContext(s *godog.Suite) { s.Step(`^a new service is returned$`, f.aNewServiceIsReturned) s.Step(`^I call getVolProvisionType with bad params$`, f.iCallGetVolProvisionTypeWithBadParams) s.Step(`^i Call getStoragePoolnameByID "([^"]*)"$`, f.iCallGetStoragePoolnameByID) + s.Step(`^I call evalsymlink "([^"]*)"$`, f.iCallEvalsymlink) + s.Step(`^I Call nodeGetAllSystems$`, f.iCallNodeGetAllSystems) + s.Step(`^I do not have a gateway connection$`, f.iDoNotHaveAGatewayConnection) + s.Step(`^I do not have a valid gateway endpoint$`, f.iDoNotHaveAValidGatewayEndpoint) + s.Step(`^I do not have a valid gateway password$`, f.iDoNotHaveAValidGatewayPassword) } diff --git a/service/step_handlers_test.go b/service/step_handlers_test.go index c12eddbc..2b126cd1 100644 --- a/service/step_handlers_test.go +++ b/service/step_handlers_test.go @@ -40,6 +40,8 @@ var ( NoSysNameError bool NoAdminError bool WrongSysNameError bool + NoVolumeIDError bool + SetVolumeSizeError bool } ) @@ -82,6 +84,8 @@ func getHandler() http.Handler { stepHandlersErrors.NoSysNameError = false stepHandlersErrors.NoAdminError = false stepHandlersErrors.WrongSysNameError = false + stepHandlersErrors.NoVolumeIDError = false + stepHandlersErrors.SetVolumeSizeError = false sdcMappings = sdcMappings[:0] sdcMappingsID = "" @@ -104,6 +108,13 @@ func getRouter() http.Handler { // handleLogin implements GET /api/login func handleLogin(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || len(strings.TrimSpace(u)) < 1 || len(strings.TrimSpace(p)) < 1 { + w.Header().Set("WWW-Authenticate", "Basic realm=Restricted") + w.WriteHeader(http.StatusUnauthorized) + returnJSONFile("features", "authorization_failure.json", w, nil) + return + } if testControllerHasNoConnection { w.WriteHeader(http.StatusRequestTimeout) return @@ -345,6 +356,11 @@ func handleAction(w http.ResponseWriter, r *http.Request) { if name != "" { volumeNameToID[name] = "" } + case "setVolumeSize": + if stepHandlersErrors.SetVolumeSizeError { + writeError(w, "induced error", http.StatusRequestTimeout, codes.Internal) + return + } } } @@ -408,6 +424,10 @@ func handleInstances(w http.ResponseWriter, r *http.Request) { writeError(w, "induced error", http.StatusRequestTimeout, codes.Internal) return } + if stepHandlersErrors.NoVolumeIDError { + writeError(w, "Volume ID is required", http.StatusRequestTimeout, codes.InvalidArgument) + return + } if stepHandlersErrors.SIOGatewayVolumeNotFoundError { writeError(w, "Could not find the volume", http.StatusRequestTimeout, codes.Internal) diff --git a/test/helm/betasnap1.yaml b/test/helm/betasnap1.yaml new file mode 100644 index 00000000..55078042 --- /dev/null +++ b/test/helm/betasnap1.yaml @@ -0,0 +1,9 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshot +metadata: + name: pvol0-snap1 + namespace: helmtest-vxflexos +spec: + volumeSnapshotClassName: vxflexos-snapclass + source: + persistentVolumeClaimName: pvol0 diff --git a/test/helm/betasnap2.yaml b/test/helm/betasnap2.yaml new file mode 100644 index 00000000..31484e11 --- /dev/null +++ b/test/helm/betasnap2.yaml @@ -0,0 +1,9 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshot +metadata: + name: pvol0-snap2 + namespace: helmtest-vxflexos +spec: + volumeSnapshotClassName: vxflexos-snapclass + source: + persistentVolumeClaimName: pvol0 diff --git a/test/helm/unsupported/block1/Chart.yaml b/test/helm/block/block-rwx/Chart.yaml similarity index 88% rename from test/helm/unsupported/block1/Chart.yaml rename to test/helm/block/block-rwx/Chart.yaml index 19136b54..1daee12d 100644 --- a/test/helm/unsupported/block1/Chart.yaml +++ b/test/helm/block/block-rwx/Chart.yaml @@ -1,4 +1,4 @@ -name: vxflex-csi +name: block-rwx version: 0.0.1 appVersion: 2.6.0 description: | diff --git a/test/helm/block/block-rwx/templates/test.yaml b/test/helm/block/block-rwx/templates/test.yaml new file mode 100644 index 00000000..26f36a40 --- /dev/null +++ b/test/helm/block/block-rwx/templates/test.yaml @@ -0,0 +1,47 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vxflextest + namespace: {{ .Values.namespace }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol0 + namespace: {{ .Values.namespace }} +spec: + accessModes: + - ReadWriteMany + volumeMode: Block + resources: + requests: + storage: 8Gi + storageClassName: {{ .Values.storageclass }} +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: vxflextest + namespace: {{ .Values.namespace }} +spec: + replicas: 2 + selector: + matchLabels: + app: vxflextest + template: + metadata: + labels: + app: vxflextest + spec: + serviceAccount: vxflextest + containers: + - name: test + image: docker.io/centos:latest + command: [ "/bin/sleep", "3600" ] + volumeDevices: + - devicePath: "/dev/data0" + name: pvol0 + volumes: + - name: pvol0 + persistentVolumeClaim: + claimName: pvol0 diff --git a/test/helm/block/block-rwx/values.yaml b/test/helm/block/block-rwx/values.yaml new file mode 100644 index 00000000..8b77916c --- /dev/null +++ b/test/helm/block/block-rwx/values.yaml @@ -0,0 +1 @@ +storageclass: vxflexos diff --git a/test/helm/block/block1/Chart.yaml b/test/helm/block/block1/Chart.yaml new file mode 100644 index 00000000..a262edfb --- /dev/null +++ b/test/helm/block/block1/Chart.yaml @@ -0,0 +1,10 @@ +name: block1 +version: 0.0.1 +appVersion: 2.6.0 +description: | + Tests VxFlexOS CSI deployments. +keywords: +- vxflexos-csi +- storage +- block +engine: gotpl diff --git a/test/helm/block/block1/templates/test.yaml b/test/helm/block/block1/templates/test.yaml new file mode 100644 index 00000000..ae766cfc --- /dev/null +++ b/test/helm/block/block1/templates/test.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.name }} + namespace: {{ .Values.namespace }} +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.name }} + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: {{ .Values.name }} + serviceName: test2vols + template: + metadata: + labels: + app: {{ .Values.name }} + spec: + serviceAccount: {{ .Values.name }} + containers: + - name: test + image: docker.io/centos:latest + command: [ "/bin/sleep", "3600" ] + volumeDevices: + - devicePath: "/dev/data0" + name: pvol0 + volumeClaimTemplates: + - metadata: + name: pvol0 + spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + storageClassName: vxflexos + resources: + requests: + storage: 8Gi + diff --git a/test/helm/block/block1/values.yaml b/test/helm/block/block1/values.yaml new file mode 100644 index 00000000..ae65d217 --- /dev/null +++ b/test/helm/block/block1/values.yaml @@ -0,0 +1,2 @@ +storageclass: vxflexos +name: vxflextest diff --git a/test/helm/unsupported/block2/Chart.yaml b/test/helm/block/block2/Chart.yaml similarity index 88% rename from test/helm/unsupported/block2/Chart.yaml rename to test/helm/block/block2/Chart.yaml index 19136b54..037af4dd 100644 --- a/test/helm/unsupported/block2/Chart.yaml +++ b/test/helm/block/block2/Chart.yaml @@ -1,4 +1,4 @@ -name: vxflex-csi +name: block2 version: 0.0.1 appVersion: 2.6.0 description: | diff --git a/test/helm/block/block2/templates/test.yaml b/test/helm/block/block2/templates/test.yaml new file mode 100644 index 00000000..64386df2 --- /dev/null +++ b/test/helm/block/block2/templates/test.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.name }} + namespace: {{ .Values.namespace }} +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: {{ .Values.name }} + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: {{ .Values.name }} + serviceName: test2vols + template: + metadata: + labels: + app: {{ .Values.name }} + spec: + serviceAccount: {{ .Values.name }} + containers: + - name: test + image: docker.io/centos:latest + command: [ "/bin/sleep", "3600" ] + volumeDevices: + - devicePath: "/dev/data0" + name: pvol0 + - devicePath: "/dev/data1" + name: pvol1 + volumeClaimTemplates: + - metadata: + name: pvol0 + spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + storageClassName: {{ .Values.storageclass }} + resources: + requests: + storage: 8Gi + - metadata: + name: pvol1 + spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + storageClassName: {{ .Values.storageclass }} + resources: + requests: + storage: 12Gi + diff --git a/test/helm/block/block2/values.yaml b/test/helm/block/block2/values.yaml new file mode 100644 index 00000000..38dc52c9 --- /dev/null +++ b/test/helm/block/block2/values.yaml @@ -0,0 +1,2 @@ +name: vxflextest +storageclass: vxflexos diff --git a/test/helm/blocksnap/1snap/Chart.yaml b/test/helm/blocksnap/1snap/Chart.yaml new file mode 100644 index 00000000..20fcf7f7 --- /dev/null +++ b/test/helm/blocksnap/1snap/Chart.yaml @@ -0,0 +1,11 @@ +name: 1snap +version: 1.0.0 +apiVersion: v1 +appVersion: 1.0.0 +description: | + Tests VxFlexOS CSI deployments. +icon: https://avatars1.githubusercontent.com/u/20958494?s=200&v=4 +keywords: +- vxflexos-csi +- storage +engine: gotpl diff --git a/test/helm/blocksnap/1snap/templates/snap1.yaml b/test/helm/blocksnap/1snap/templates/snap1.yaml new file mode 100644 index 00000000..f2c03f28 --- /dev/null +++ b/test/helm/blocksnap/1snap/templates/snap1.yaml @@ -0,0 +1,9 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshot +metadata: + name: pvol0-snap1 + namespace: {{ .Values.namespace }} +spec: + source: + persistentVolumeClaimName: pvol0 + volumeSnapshotClassName: {{ .Values.snapclass }} diff --git a/test/helm/blocksnap/1vol/Chart.yaml b/test/helm/blocksnap/1vol/Chart.yaml new file mode 100644 index 00000000..6b2bdd51 --- /dev/null +++ b/test/helm/blocksnap/1vol/Chart.yaml @@ -0,0 +1,11 @@ +name: 1vol +version: 1.0.0 +apiVersion: v1 +appVersion: 1.0.0 +description: | + Tests VxFlexOS CSI deployments. +icon: https://avatars1.githubusercontent.com/u/20958494?s=200&v=4 +keywords: +- vxflexos-csi +- storage +engine: gotpl diff --git a/test/helm/blocksnap/1vol/templates/test.yaml b/test/helm/blocksnap/1vol/templates/test.yaml new file mode 100644 index 00000000..fab8cc97 --- /dev/null +++ b/test/helm/blocksnap/1vol/templates/test.yaml @@ -0,0 +1,47 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol0 + namespace: {{ .Values.namespace }} +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: {{ .Values.storageclass }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vol + namespace: {{ .Values.namespace }} +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: vol + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: vol + serviceName: 1vol + template: + metadata: + labels: + app: vol + spec: + serviceAccount: vol + containers: + - name: test + image: docker.io/centos:latest + command: [ "/bin/sleep", "3600" ] + volumeMounts: + - mountPath: "/data0" + name: pvol0 + volumes: + - name: pvol0 + persistentVolumeClaim: + claimName: pvol0 diff --git a/test/helm/blocksnap/1volfromsnap/Chart.yaml b/test/helm/blocksnap/1volfromsnap/Chart.yaml new file mode 100644 index 00000000..e8de7ccc --- /dev/null +++ b/test/helm/blocksnap/1volfromsnap/Chart.yaml @@ -0,0 +1,11 @@ +name: 1volfromsnap +version: 1.0.0 +apiVersion: v1 +appVersion: 1.0.0 +description: | + Tests VxFlexOS CSI deployments. +icon: https://avatars1.githubusercontent.com/u/20958494?s=200&v=4 +keywords: +- vxflexos-csi +- storage +engine: gotpl diff --git a/test/helm/blocksnap/1volfromsnap/templates/test.yaml b/test/helm/blocksnap/1volfromsnap/templates/test.yaml new file mode 100644 index 00000000..b63c78cc --- /dev/null +++ b/test/helm/blocksnap/1volfromsnap/templates/test.yaml @@ -0,0 +1,49 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol0-copy1 + namespace: {{ .Values.namespace }} +spec: + accessModes: + - ReadWriteOnce + dataSource: + apiGroup: snapshot.storage.k8s.io + kind: VolumeSnapshot + name: pvol0-snap1 + resources: + requests: + storage: 8Gi + storageClassName: {{ .Values.storageclass }} + volumeMode: Block +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: copy + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: copy + serviceName: copy + template: + metadata: + labels: + app: copy + spec: + serviceAccount: vol + containers: + - name: copy + image: docker.io/centos:latest + command: [ "/bin/sleep", "3600" ] + securityContext: + capabilities: + add: + - SYS_ADMIN + volumeDevices: + - devicePath: "/data0" + name: pvol0-copy1 + volumes: + - name: pvol0-copy1 + persistentVolumeClaim: + claimName: pvol0-copy1 diff --git a/test/helm/blocksnap/README.md b/test/helm/blocksnap/README.md new file mode 100644 index 00000000..12755da2 --- /dev/null +++ b/test/helm/blocksnap/README.md @@ -0,0 +1,26 @@ +Blocksnap Test +============== + +This is a real use case that arises in data protection. The idea is to make a mounted file system from a volume for a first pod, +have that pod write some data to the file system, and then take a snap of the volume, and then to make a second pod which +uses the snap as a volume source for a block mounted volume. + +The block volume created from the snapshot can then be used to transmit the volume efficiently by the 2nd pod to remote storage. + +The test mounts the block volume in the 2nd pod in a local directory, and compares the data written by the 1st pod (which is a tar .tgz file) +with the data from mounting the snap contents in the 2nd pod, which should always be identical. + +Running the Test +---------------- + +Execute "sh run.sh" to run the test in a kubernetes environment that supports beta block snapshots. The test is preconfigured to use +the vxflexos CSI torage system in run.sh. You can edit run.sh and change the storageclass, snapclass (the volumesnapshotclass name), +and namespace parameters to run the test for a different type of CSI storage. + +The test is constructed from three helm charts that are deployed in the following sequence: +1. 1vol creates one volume and deploys it in a pod +2. 1snap creates a snapshot from the volume in step 1. +3. 1volfromsnap creates a volume from the snapshot and deploys it in a second pod. + +If the test runs succssfully, it compares the data generated in the first pod with the data from the snap in the second pod +to make sure they match. Then it deletes the helm deployments and waits until the pvcs in the namespace are deleted. diff --git a/test/helm/blocksnap/run.sh b/test/helm/blocksnap/run.sh new file mode 100644 index 00000000..2d6fe0f9 --- /dev/null +++ b/test/helm/blocksnap/run.sh @@ -0,0 +1,89 @@ +#!/bin/sh +namespace=test +helmsettings="storageclass=vxflexos,snapclass=vxflexos-snapclass" +alias k=kubectl + +# arg1=pod name +waitOnRunning() { + echo "waiting on $1 to reach running" + running=0 + while [ $running -ne 1 ] ; + do + k get pods -n $namespace | grep $1 + running=$(k get pods -n $namespace | grep $1 | grep Running | wc -l) + sleep 5 + done +} + +# arg1=volumesnapshot name +waitOnSnapshotReady() { + echo "waiting on $1 to reach ready" + ready="false" + while [ "$ready" != "true" ] ; + do + name=$(k get volumesnapshot -n $namespace | grep $1 | awk ' { print $1; }') + ready=$(k get volumesnapshot -n $namespace | grep $1 | awk ' { print $2; }') + echo $name ready: $ready + sleep 5 + done +} + +# waitOnNoPvc() +waitOnNoPvc() { + echo "waiting on all pvcs to be deleted from namespace" + pvcs=$(k get pvc -n $namespace | grep -v NAME | wc -l) + while [ $pvcs -gt 0 ] ; + do + pvcs=$(k get pvc -n $namespace | grep -v NAME | wc -l) + k get pvc -n $namespace + sleep 5 + done +} + +helm install --set $helmsettings -n $namespace 1vol 1vol +waitOnRunning vol-0 + +# Write some data into the file system. +echo "k exec -it -n test vxflextest-0 -- tar czvf /data0/data.tgz /usr" +k exec -it -n test vol-0 -- tar czvf /data0/data.tgz /usr +# Sync the data onto the file system +k exec -it -n test vol-0 -- sync +k exec -it -n test vol-0 -- ls -l /data0/data.tgz +sumA=$(k exec -it -n test vol-0 -- md5sum /data0/data.tgz | awk ' {print $1}') +echo sumA $sumA +k exec -it -n test vol-0 -- sync + +helm install --set $helmsettings -n $namespace 1snap 1snap +waitOnSnapshotReady vol0-snap1 + +helm install --set $helmsettings -n test 1volfromsnap 1volfromsnap +waitOnRunning copy-0 +k get pods -n test + +echo "Checking the data" +echo "k exec -it -n test copy-0 -- mkdir /tmp/foo" +k exec -it -n test copy-0 -- mkdir /tmp/foo +echo "k exec -it -n test copy-0 -- mount /data0 /tmp/foo" +k exec -it -n test copy-0 -- mount /data0 /tmp/foo +echo "k exec -it -n test copy-0 -- ls -l /tmp/foo/data.tgz" +k exec -it -n test copy-0 -- ls -l /tmp/foo/data.tgz +echo "k exec -it -n test copy-0 -- tar tzvf /tmp/foo/data.tgz | tail -20" +k exec -it -n test copy-0 -- tar tzvf /tmp/foo/data.tgz | tail -20 +sumB=$(k exec -it -n test copy-0 -- md5sum /tmp/foo/data.tgz | awk ' {print $1}') + +echo sumA $sumA sumB $sumB +if [ "$sumA" != "$sumB" ] ; then + echo "Different checksums- test failed" + exit 2 +fi + +sleep 30 +helm delete -n $namespace 1volfromsnap + +sleep 30 +helm delete -n $namespace 1snap + +sleep 30 +helm delete -n $namespace 1vol + +waitOnNoPvc diff --git a/test/helm/common.bash b/test/helm/common.bash old mode 100644 new mode 100755 index 711f1ade..7bb5eaf8 --- a/test/helm/common.bash +++ b/test/helm/common.bash @@ -14,3 +14,5 @@ waitOnRunning() { done } +kMajorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Major:"//' -e 's/[^0-9].*//g') +kMinorVersion=$(kubectl version | grep 'Server Version' | sed -e 's/^.*Minor:"//' -e 's/[^0-9].*//g') diff --git a/test/helm/deletepvcs.sh b/test/helm/deletepvcs.sh old mode 100644 new mode 100755 index dfb017a0..c4f7631d --- a/test/helm/deletepvcs.sh +++ b/test/helm/deletepvcs.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash force=no if [ "$1" == "--force" ]; then diff --git a/test/helm/get.volume.ids b/test/helm/get.volume.ids index c91af44b..aedcab40 100644 --- a/test/helm/get.volume.ids +++ b/test/helm/get.volume.ids @@ -1,4 +1,4 @@ -#/bin/bash +#!/bin/bash ids=$(kubectl describe persistentvolume -n helmtest-vxflexos | grep VolumeHandle | awk ' { print $2; }') echo ids $ids ids=$(echo $ids | tr ' ' ',' ) diff --git a/test/helm/postgres.sh b/test/helm/postgres.sh old mode 100644 new mode 100755 index c8777457..c3b4fd17 --- a/test/helm/postgres.sh +++ b/test/helm/postgres.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash DBSQL=/root/dbsamples-0.1/world/world.sql helm install -n postgres postgres echo "Waiting for pods to come up..." diff --git a/test/helm/snap1.yaml b/test/helm/snap1.yaml deleted file mode 100644 index f6e0f5cf..00000000 --- a/test/helm/snap1.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: snapshot.storage.k8s.io/v1alpha1 -kind: VolumeSnapshot -metadata: - name: pvol0-snap1 - namespace: helmtest-vxflexos -spec: - snapshotClassName: vxflexos-snapclass - source: - name: pvol0 - kind: PersistentVolumeClaim diff --git a/test/helm/snap2.yaml b/test/helm/snap2.yaml deleted file mode 100644 index b21936fa..00000000 --- a/test/helm/snap2.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: snapshot.storage.k8s.io/v1alpha1 -kind: VolumeSnapshot -metadata: - name: pvol0-snap2 - namespace: helmtest-vxflexos -spec: - snapshotClassName: vxflexos-snapclass - source: - name: pvol0 - kind: PersistentVolumeClaim diff --git a/test/helm/snapcg.yaml b/test/helm/snapcg.yaml deleted file mode 100644 index 0dc9d993..00000000 --- a/test/helm/snapcg.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: snapshot.storage.k8s.io/v1alpha1 -kind: VolumeSnapshot -metadata: - name: pvol0-snap1 - namespace: helmtest-vxflexos -spec: - snapshotClassName: vxflexos-cgsnap - source: - name: pvol0 - kind: PersistentVolumeClaim diff --git a/test/helm/snapcgtest.sh b/test/helm/snapcgtest.sh deleted file mode 100644 index 63ecde5f..00000000 --- a/test/helm/snapcgtest.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -rm -f .volsnapclass.yaml -kubectl delete volumesnapshotclass vxflexos-cgsnap -n helmtest-vxflexos - -# Collect the list of "Bound" volumes and translate them to VxFlex OS ids -vols=$( kubectl get pv | grep Bound | awk ' { print $1; }' ) -ids="" -for avol in $vols; -do - echo $avol - ids="${ids} $(kubectl describe persistentvolume $avol | grep VolumeHandle | awk ' { print $2; }')" -done -ids=$(echo $ids | tr ' ' ',' ) -echo ids $ids - -# Edit the snapshotclass to have a valid VOLUME_ID_LIST -sed .volsnapclass.yaml -cat .volsnapclass.yaml - -echo "creating volume snapshot class..." -kubectl create -f .volsnapclass.yaml -sleep 3 -echo "creating volume snapshot" -kubectl create -f snapcg.yaml -sleep 20 -echo "snapshot created:" -kubectl get volumesnapshot -n helmtest-vxflexos -sleep 60 -echo "deleting volume snapshot" -kubectl delete -f snapcg.yaml -sleep 20 -echo "snapshot deleted:" -kubectl get volumesnapshot -n helmtest-vxflexos -kubectl delete volumesnapshotclass vxflexos-cgsnap -n helmtest-vxflexos diff --git a/test/helm/snaprestoretest.sh b/test/helm/snaprestoretest.sh old mode 100644 new mode 100755 index ecbfd4fa..244f67f1 --- a/test/helm/snaprestoretest.sh +++ b/test/helm/snaprestoretest.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash NS=helmtest-vxflexos source ./common.bash @@ -11,11 +11,11 @@ kubectl exec -n ${NS} vxflextest-0 -- ls -l /data0 kubectl exec -n ${NS} vxflextest-0 -- sync kubectl exec -n ${NS} vxflextest-0 -- sync echo "creating snap1 of pvol0" -kubectl create -f snap1.yaml +kubectl create -f betasnap1.yaml sleep 10 kubectl get volumesnapshot -n ${NS} echo "updating container to add a volume sourced from snapshot" -helm upgrade -n helmtest-vxflexos 2vols 2vols+restore +helm upgrade -n helmtest-vxflexos 2vols 2vols+restore echo "waiting for container to upgrade/stabalize" sleep 20 waitOnRunning diff --git a/test/helm/snaptest.sh b/test/helm/snaptest.sh old mode 100644 new mode 100755 index 805261c3..6a3c8936 --- a/test/helm/snaptest.sh +++ b/test/helm/snaptest.sh @@ -1,19 +1,22 @@ -#!/bin/sh +#!/bin/bash +NS=helmtest-vxflexos +source ./common.bash + echo "creating snap1 of pvol0" -kubectl create -f snap1.yaml +kubectl create -f betasnap1.yaml sleep 10 -kubectl get volumesnapshot -n helmtest-vxflexos -kubectl describe volumesnapshot -n helmtest-vxflexos +kubectl get volumesnapshot -n ${NS} +kubectl describe volumesnapshot -n ${NS} sleep 10 echo "creating snap2 of pvol0" -kubectl create -f snap2.yaml +kubectl create -f betasnap2.yaml sleep 10 -kubectl describe volumesnapshot -n helmtest-vxflexos +kubectl describe volumesnapshot -n ${NS} sleep 10 echo "deleting snapshots..." -kubectl delete volumesnapshot pvol0-snap1 -n helmtest-vxflexos +kubectl delete volumesnapshot pvol0-snap1 -n ${NS} sleep 10 -kubectl delete volumesnapshot pvol0-snap2 -n helmtest-vxflexos +kubectl delete volumesnapshot pvol0-snap2 -n ${NS} sleep 10 -kubectl get volumesnapshot -n helmtest-vxflexos +kubectl get volumesnapshot -n ${NS} diff --git a/test/helm/starttest.sh b/test/helm/starttest.sh index 63e1ed6c..91401084 100755 --- a/test/helm/starttest.sh +++ b/test/helm/starttest.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash [ "$1" = "" ] && { echo "requires test name as argument" exit 2 diff --git a/test/helm/stoptest.sh b/test/helm/stoptest.sh index c86b83d3..6517424b 100755 --- a/test/helm/stoptest.sh +++ b/test/helm/stoptest.sh @@ -1,4 +1,4 @@ -#/bin/sh +#!/bin/bash helm delete -n helmtest-vxflexos $1 sleep 10 kubectl get pods -n helmtest-vxflexos diff --git a/test/helm/unsupported/block1/templates/block-pvc.yaml b/test/helm/unsupported/block1/templates/block-pvc.yaml deleted file mode 100644 index 9f36aff2..00000000 --- a/test/helm/unsupported/block1/templates/block-pvc.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: block-pvc - namespace: helmtest-vxflexos -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 8Gi - storageClassName: vxflexos - volumeMode: Block - #volumeName: block-pv diff --git a/test/helm/unsupported/block1/templates/test.yaml b/test/helm/unsupported/block1/templates/test.yaml deleted file mode 100644 index 85d22e1a..00000000 --- a/test/helm/unsupported/block1/templates/test.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: vxflextest - namespace: helmtest-vxflexos ---- -kind: StatefulSet -apiVersion: apps/v1 -metadata: - name: vxflextest - namespace: helmtest-vxflexos -spec: - selector: - matchLabels: - app: vxflextest - template: - metadata: - labels: - app: vxflextest - spec: - serviceAccount: vxflextest - containers: - - name: test - image: docker.io/centos:latest - command: [ "/bin/sleep", "3600" ] - deviceMounts: - - devicePath: "/data2" - name: block - volumes: - - name: block - persistentVolumeClaim: - claimName: block-pvc - diff --git a/test/helm/unsupported/block2/templates/block-pv.yaml b/test/helm/unsupported/block2/templates/block-pv.yaml deleted file mode 100644 index a9cbb9e7..00000000 --- a/test/helm/unsupported/block2/templates/block-pv.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: block-pv - namespace: helmtest-vxflexos -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 8Gi - csi: - driver: vxflexos.emc.dell.com - volumeHandle: 72cf800900000004 - persistentVolumeReclaimPolicy: Delete - storageClassName: vxflexos - volumeMode: Block diff --git a/test/helm/unsupported/block2/templates/block-pvc.yaml b/test/helm/unsupported/block2/templates/block-pvc.yaml deleted file mode 100644 index 9ae1c4bf..00000000 --- a/test/helm/unsupported/block2/templates/block-pvc.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: block-pvc - namespace: helmtest-vxflexos -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 8Gi - storageClassName: vxflexos - volumeMode: Block - volumeName: block-pv diff --git a/test/helm/unsupported/block2/templates/test.yaml b/test/helm/unsupported/block2/templates/test.yaml deleted file mode 100644 index b10d45b0..00000000 --- a/test/helm/unsupported/block2/templates/test.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: vxflextest - namespace: helmtest-vxflexos ---- -kind: StatefulSet -apiVersion: apps/v1 -metadata: - name: vxflextest - namespace: helmtest-vxflexos -spec: - serviceName: test - selector: - matchLabels: - app: vxflextest - template: - metadata: - labels: - app: vxflextest - spec: - serviceAccount: vxflextest - containers: - - name: test - image: docker.io/centos:latest - command: [ "/bin/sleep", "3600" ] - volumeDevices: - - devicePath: "/data2" - name: block - volumes: - - name: block - persistentVolumeClaim: - claimName: block-pvc - diff --git a/test/helm/volumesnapshotclass.yaml b/test/helm/volumesnapshotclass.yaml index b75948a2..6d168f1c 100644 --- a/test/helm/volumesnapshotclass.yaml +++ b/test/helm/volumesnapshotclass.yaml @@ -1,8 +1,5 @@ -apiVersion: snapshot.storage.k8s.io/v1alpha1 +apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshotClass metadata: - name: vxflexos-cgsnap - namespace: helmtest-vxflexos -snapshotter: csi-vxflexos.dellemc.com -parameters: - VolumeIDList: "__VOLUME_ID_LIST__" + name: vxflexos-snapclass +driver: csi-vxflexos.dellemc.com diff --git a/test/integration/features/integration.feature b/test/integration/features/integration.feature index 550ca141..a7e75ad5 100644 --- a/test/integration/features/integration.feature +++ b/test/integration/features/integration.feature @@ -40,6 +40,35 @@ Feature: VxFlex OS CSI interface And when I call DeleteVolume Then there are no errors +@long + Scenario Outline: Create volume, create snapshot, delete snapshot, delete volume for multiple sizes + Given a VxFlexOS service + And a capability with voltype "block" access "single-writer" fstype "xfs" + And a basic block volume request "integration1" + When I call CreateVolume + And when I call PublishVolume "SDC_GUID" + And when I call NodePublishVolume "SDC_GUID" + And verify published volume with voltype "block" access "single-writer" fstype "xfs" + And when I call NodePublishVolume "SDC_GUID" + And I write block data + And I call CreateSnapshot + And there are no errors + And I call DeleteSnapshot + And there are no errors + And when I call NodeUnpublishVolume "SDC_GUID" + And when I call UnpublishVolume "SDC_GUID" + And when I call DeleteVolume + And there are no errors + And when I call DeleteAllVolumes + And there are no errors + + Examples: + | size | + | "8" | + | "16" | + | "32" | + | "64" | + Scenario: Create volume, create snapshot, create volume from snapshot, delete original volume, delete new volume Given a VxFlexOS service And a basic block volume request "integration1" "8" @@ -106,6 +135,7 @@ Feature: VxFlex OS CSI interface And when I call DeleteAllVolumes And there are no errors +@xwip Scenario Outline: Create publish, node-publish, node-unpublish, unpublish, and delete basic volume Given a VxFlexOS service And a capability with voltype access fstype @@ -129,10 +159,7 @@ Feature: VxFlex OS CSI interface | "block" | "single-writer" | "none" | "none" | | "block" | "multi-writer" | "none" | "none" | | "block" | "single-writer" | "none" | "none" | - - - Scenario: Create volume with access mode read only many Given a VxFlexOS service And a capability with voltype "mount" access "single-writer" fstype "xfs" @@ -153,6 +180,27 @@ Feature: VxFlex OS CSI interface And when I call DeleteVolume Then there are no errors + Scenario: Create block volume with access mode read write many + Given a VxFlexOS service + And a capability with voltype "block" access "multi-writer" fstype "" + And a volume request "block-multi-writer-test" "8" + When I call CreateVolume + And there are no errors + And when I call PublishVolume "SDC_GUID" + And when I call PublishVolume "ALT_GUID" + And when I call NodePublishVolumeWithPoint "SDC_GUID" "/tmp/tempdev1" + And there are no errors + And when I call NodePublishVolumeWithPoint "SDC_GUID" "/tmp/tempdev2" + And there are no errors + And when I call NodePublishVolume "ALT_GUID" + And there are no errors + And when I call NodeUnpublishVolume "ALT_GUID" + And when I call NodeUnpublishVolumeWithPoint "SDC_GUID" "/tmp/tempdev1" + And when I call NodeUnpublishVolumeWithPoint "SDC_GUID" "/tmp/tempdev2" + And when I call UnpublishVolume "SDC_GUID" + And when I call UnpublishVolume "ALT_GUID" + And when I call DeleteVolume + Then there are no errors Scenario: Create publish, unpublish, and delete basic volume Given a VxFlexOS service @@ -219,11 +267,6 @@ Feature: VxFlex OS CSI interface | 1 | | 2 | | 5 | - | 10 | - | 20 | - | 50 | - | 100 | - | 200 | Scenario Outline: Idempotent create volumes, publish, node publish, node unpublish, unpublish, delete volumes in parallel Given a VxFlexOS service @@ -251,9 +294,63 @@ Feature: VxFlex OS CSI interface And there are no errors And when I delete volumes in parallel Then there are no errors - + Examples: + | numberOfVolumes | | 1 | + | 10 | + + + + Scenario: Expand Volume Mount + Given a VxFlexOS service + And a capability with voltype "mount" access "single-writer" fstype "xfs" + And a volume request "integration30" "16" + When I call CreateVolume + And there are no errors + And when I call PublishVolume "SDC_GUID" + And there are no errors + And when I call NodePublishVolume "SDC_GUID" + And there are no errors + And when I call ExpandVolume to "20" + And there are no errors + And when I call NodeExpandVolume + And there are no errors + And I call ListVolume + And a valid ListVolumeResponse is returned + And when I call NodeUnpublishVolume "SDC_GUID" + And there are no errors + And when I call UnpublishVolume "SDC_GUID" + And there are no errors + And when I call DeleteVolume + Then there are no errors + + + Scenario: Expand Volume Block + Given a VxFlexOS service + And a capability with voltype "block" access "single-writer" fstype "none" + And a volume request "integration33" "8" + When I call CreateVolume + And there are no errors + And when I call PublishVolume "SDC_GUID" + And there are no errors + And when I call NodePublishVolume "SDC_GUID" + And there are no errors + And when I call ExpandVolume to "10" + And there are no errors + And when I call NodeExpandVolume + And there are no errors + And I call ListVolume + And a valid ListVolumeResponse is returned + And when I call NodeUnpublishVolume "SDC_GUID" + And there are no errors + And when I call UnpublishVolume "SDC_GUID" + And there are no errors + And when I call DeleteVolume + Then there are no errors + + | numberOfVolumes | + | 1 | | 10 | | 20 | diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index e08a88c7..7dce8e6e 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -55,7 +55,7 @@ func TestMain(m *testing.M) { }, godog.Options{ Format: "pretty", Paths: []string{"features"}, - // Tags: "wip", + //Tags: "wip", }) if st := m.Run(); st > exitVal { exitVal = st diff --git a/test/integration/run.sh b/test/integration/run.sh index e4670ca2..1c5a5007 100644 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -11,7 +11,7 @@ if [ $rc -ne 0 ]; then echo "failed http unauthorized test"; exit $rc; fi rm -f unix.sock source ../../env.sh echo $SDC_GUID -GOOS=linux CGO_ENABLED=0 GO111MODULE=on go test -v -coverprofile=c.linux.out -timeout 30m -coverpkg=github.com/dell/csi-vxflexos/service *test.go & +GOOS=linux CGO_ENABLED=0 GO111MODULE=on go test -v -coverprofile=c.linux.out -timeout 60m -coverpkg=github.com/dell/csi-vxflexos/service *test.go & if [ -f ./csi-sanity ] ; then sleep 5 ./csi-sanity --csi.endpoint=./unix_sock --csi.testvolumeparameters=./pool.yml --csi.testvolumesize 8589934592 diff --git a/test/integration/step_defs_test.go b/test/integration/step_defs_test.go index c96c66f2..4f22b8c4 100644 --- a/test/integration/step_defs_test.go +++ b/test/integration/step_defs_test.go @@ -33,6 +33,9 @@ type feature struct { snapshotID string volIDList []string maxRetryCount int + expandVolumeResponse *csi.ControllerExpandVolumeResponse + nodeExpandVolumeRequest *csi.NodeExpandVolumeRequest + nodeExpandVolumeResponse *csi.NodeExpandVolumeResponse } func (f *feature) addError(err error) { @@ -50,6 +53,8 @@ func (f *feature) aVxFlexOSService() error { f.snapshotID = "" f.volIDList = f.volIDList[:0] f.maxRetryCount = MaxRetries + f.expandVolumeResponse = nil + f.nodeExpandVolumeResponse = nil return nil } @@ -342,15 +347,19 @@ func (f *feature) getNodePublishVolumeRequest() *csi.NodePublishVolumeRequest { } func (f *feature) whenICallNodePublishVolumeWithPoint(arg1 string, arg2 string) error { - _, err := os.Stat(arg2) - if err != nil && os.IsNotExist(err) { - err = os.Mkdir(arg2, 0777) - if err != nil { - return err - } + block := f.capability.GetBlock() + if block != nil { + } else { + _, err := os.Stat(arg2) + if err != nil && os.IsNotExist(err) { + err = os.Mkdir(arg2, 0777) + if err != nil { + return err + } + } } - err = f.nodePublishVolume(f.volID, arg2) + err := f.nodePublishVolume(f.volID, arg2) if err != nil { fmt.Printf("NodePublishVolume failed: %s\n", err.Error()) f.addError(err) @@ -405,7 +414,6 @@ func (f *feature) whenICallNodeUnpublishVolume(arg1 string) error { } func (f *feature) whenICallNodeUnpublishVolumeWithPoint(arg1, arg2 string) error { - err := f.nodeUnpublishVolume(f.volID, arg2) if err != nil { fmt.Printf("NodeUnpublishVolume failed: %s\n", err.Error()) @@ -918,6 +926,111 @@ func (f *feature) whenIDeleteVolumesInParallel(nVols int) error { return nil } +// Writes a fixed pattern of block data (0x57 bytes) in 1 MB chunks to raw block mounted at /tmp/datafile. +// Used to make sure the data has changed when taking a snapshot +func (f *feature) iWriteBlockData() error { + buf := make([]byte, 1024*1024) + for i := 0; i < 1024*1024; i++ { + buf[i] = 0x57 + } + fp, err := os.OpenFile("/tmp/datafile", os.O_RDWR, 0666) + if err != nil { + return nil + } + var nrecords int + for err == nil { + var n int + n, err = fp.Write(buf) + if n == len(buf) { + nrecords++ + } + if (nrecords % 256) == 0 { + fmt.Printf("%d records\r", nrecords) + } + } + fp.Close() + fmt.Printf("\rWrote %d MB\n", nrecords) + return nil + +} + +func (f *feature) whenICallExpandVolumeTo(size int64) error { + + err := f.controllerExpandVolume(f.volID, size) + if err != nil { + fmt.Printf("ControllerExpandVolume %s:\n", err.Error()) + f.addError(err) + } else { + fmt.Printf("ControllerExpandVolume completed successfully\n") + } + time.Sleep(SleepTime) + return nil +} + +func (f *feature) controllerExpandVolume(volID string, size int64) error { + + const bytesInKiB = 1024 + var resp *csi.ControllerExpandVolumeResponse + var err error + req := &csi.ControllerExpandVolumeRequest{ + VolumeId: volID, + CapacityRange: &csi.CapacityRange{RequiredBytes: size * bytesInKiB * bytesInKiB * bytesInKiB}, + } + ctx := context.Background() + client := csi.NewControllerClient(grpcClient) + for i := 0; i < f.maxRetryCount; i++ { + resp, err = client.ControllerExpandVolume(ctx, req) + if err == nil { + break + } + fmt.Printf("Controller ExpandVolume retry: %s\n", err.Error()) + time.Sleep(RetrySleepTime) + } + f.expandVolumeResponse = resp + return err +} + +func (f *feature) whenICallNodeExpandVolume() error { + + nodePublishReq := f.nodePublishVolumeRequest + if nodePublishReq == nil { + err := fmt.Errorf("Volume is not stage, nodePublishVolumeRequest not found") + return err + } + err := f.nodeExpandVolume(f.volID, nodePublishReq.TargetPath) + if err != nil { + fmt.Printf("NodeExpandVolume %s:\n", err.Error()) + f.addError(err) + } else { + fmt.Printf("NodeExpandVolume completed successfully\n") + } + time.Sleep(SleepTime) + return nil + +} + +func (f *feature) nodeExpandVolume(volID, volPath string) error { + var resp *csi.NodeExpandVolumeResponse + var err error + req := &csi.NodeExpandVolumeRequest{ + VolumeId: volID, + VolumePath: volPath, + } + ctx := context.Background() + client := csi.NewNodeClient(grpcClient) + // Retry loop to deal with API being overwhelmed + for i := 0; i < f.maxRetryCount; i++ { + resp, err = client.NodeExpandVolume(ctx, req) + if err == nil { + break + } + fmt.Printf("Node ExpandVolume retry: %s\n", err.Error()) + time.Sleep(RetrySleepTime) + } + f.nodeExpandVolumeResponse = resp + return err +} + func FeatureContext(s *godog.Suite) { f := &feature{} s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService) @@ -956,4 +1069,7 @@ func FeatureContext(s *godog.Suite) { s.Step(`^I node unpublish (\d+) volumes in parallel$`, f.iNodeUnpublishVolumesInParallel) s.Step(`^I unpublish (\d+) volumes in parallel$`, f.iUnpublishVolumesInParallel) s.Step(`^when I delete (\d+) volumes in parallel$`, f.whenIDeleteVolumesInParallel) + s.Step(`^I write block data$`, f.iWriteBlockData) + s.Step(`^when I call ExpandVolume to "([^"]*)"$`, f.whenICallExpandVolumeTo) + s.Step(`^when I call NodeExpandVolume$`, f.whenICallNodeExpandVolume) } diff --git a/test/sanity/run.sh b/test/sanity/run.sh index 0a888e7d..d3391185 100644 --- a/test/sanity/run.sh +++ b/test/sanity/run.sh @@ -1,4 +1,4 @@ #!/bin/sh rm -rf /tmp/csi-staging rm -rf /tmp/csi-mount -csi-sanity --ginkgo.v --csi.endpoint=/root/csi-vxflexos/test/sanity/unix_sock --csi.testvolumesize 17179869184 --csi.testvolumeparameters=volParams.yaml --csi.secrets=secrets.yaml --ginkgo.skip "pagination should detect volumes added between pages and accept tokens when the last volume from a page is deleted|check the presence of new volumes and absence of deleted ones in the volume list|should fail when the volume is missing" +csi-sanity --ginkgo.v --csi.endpoint=/root/csi-vxflexos/test/sanity/unix_sock --csi.testvolumeexpandsize 25769803776 --csi.testvolumesize 17179869184 --csi.testvolumeparameters=volParams.yaml --csi.secrets=secrets.yaml --ginkgo.skip "pagination should detect volumes added between pages and accept tokens when the last volume from a page is deleted|check the presence of new volumes and absence of deleted ones in the volume list|should fail when the volume is missing"