From dce91cc62a043a34a53bef9bf63e7b1b771e0314 Mon Sep 17 00:00:00 2001 From: Serguei Bezverkhi Date: Wed, 14 Nov 2018 21:32:26 -0500 Subject: [PATCH] second part of vendor files Signed-off-by: Serguei Bezverkhi --- Gopkg.lock | 6 +- .../kubernetes-csi/csi-test/.gitignore | 11 +- .../kubernetes-csi/csi-test/.travis.yml | 19 +- .../kubernetes-csi/csi-test/CONTRIBUTING.md | 22 + .../kubernetes-csi/csi-test/Dockerfile.mock | 6 + .../kubernetes-csi/csi-test/Gopkg.lock | 237 ++ .../kubernetes-csi/csi-test/Gopkg.toml | 62 + .../kubernetes-csi/csi-test/Makefile | 52 + .../github.com/kubernetes-csi/csi-test/OWNERS | 4 + .../kubernetes-csi/csi-test/README.md | 33 +- .../kubernetes-csi/csi-test/SECURITY_CONTACTS | 14 + .../csi-test/cmd/csi-sanity/Makefile | 14 +- .../csi-test/cmd/csi-sanity/README.md | 30 + .../csi-test/cmd/csi-sanity/sanity_test.go | 18 +- .../kubernetes-csi/csi-test/driver/driver.go | 250 ++- .../csi-test/driver/driver.mock.go | 145 +- .../kubernetes-csi/csi-test/driver/mock.go | 83 + .../kubernetes-csi/csi-test/glide.lock | 135 -- .../kubernetes-csi/csi-test/glide.yaml | 16 - .../csi-test/hack/_apitest/api_test.go | 18 + .../csi-test/hack/_embedded/embedded_test.go | 42 + .../kubernetes-csi/csi-test/hack/e2e.sh | 72 +- .../kubernetes-csi/csi-test/mock/AUTHORS | 2 + .../kubernetes-csi/csi-test/mock/README.md | 22 + .../csi-test/mock/cache/SnapshotCache.go | 89 + .../kubernetes-csi/csi-test/mock/main.go | 95 + .../csi-test/mock/mocksecret.yaml | 16 + .../csi-test/mock/service/controller.go | 577 +++++ .../csi-test/mock/service/identity.go | 48 + .../csi-test/mock/service/node.go | 244 ++ .../csi-test/mock/service/service.go | 147 ++ .../csi-test/pkg/sanity/README.md | 54 +- .../csi-test/pkg/sanity/cleanup.go | 134 ++ .../csi-test/pkg/sanity/controller.go | 1994 ++++++++++++----- .../csi-test/pkg/sanity/identity.go | 113 +- .../csi-test/pkg/sanity/node.go | 688 +++--- .../csi-test/pkg/sanity/sanity.go | 173 +- .../csi-test/pkg/sanity/tests.go | 56 + .../kubernetes-csi/csi-test/test/co_test.go | 105 +- .../csi-test/test/driver_test.go | 18 +- .../csi-test/utils/safegoroutinetester.go | 2 +- 41 files changed, 4576 insertions(+), 1290 deletions(-) create mode 100644 vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/Makefile create mode 100644 vendor/github.com/kubernetes-csi/csi-test/OWNERS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/driver/mock.go delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/glide.lock delete mode 100644 vendor/github.com/kubernetes-csi/csi-test/glide.yaml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/README.md create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/main.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go create mode 100644 vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go diff --git a/Gopkg.lock b/Gopkg.lock index d2150de1..cbd68e0f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -42,15 +42,15 @@ version = "v1.1.0" [[projects]] - digest = "1:d17a296973591f13eed8399016d5ec748cfb9c92086a84e00b33139b92ba4858" + branch = "master" + digest = "1:79848f850d0d15b2413a7285441a7fd13ed8a901ebe57d59f1e49759c103b930" name = "github.com/kubernetes-csi/csi-test" packages = [ "driver", "utils", ] pruneopts = "" - revision = "31b2baed861ae5c166922808be5d1982346adf7d" - version = "v0.1.0-2" + revision = "619da6853e10bef67ddcc8f1c2b68b73154bf11d" [[projects]] branch = "master" diff --git a/vendor/github.com/kubernetes-csi/csi-test/.gitignore b/vendor/github.com/kubernetes-csi/csi-test/.gitignore index fc211aec..81c985c4 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.gitignore +++ b/vendor/github.com/kubernetes-csi/csi-test/.gitignore @@ -9,8 +9,11 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ -vendor/ +bin/mock cmd/csi-sanity/csi-sanity + +# JetBrains GoLand +.idea + +# Vim +*.swp diff --git a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml index 349982d2..7a817191 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/.travis.yml +++ b/vendor/github.com/kubernetes-csi/csi-test/.travis.yml @@ -1,12 +1,15 @@ language: go -install: - - curl https://glide.sh/get | sh - - glide install -v +sudo: required +services: + - docker matrix: include: - - go: 1.9.2 + - go: 1.10.3 script: -- go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 -- go vet $(go list ./... | grep -v vendor) -- go test $(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") -- ./hack/e2e.sh +- make test +after_success: + - if [ "${TRAVIS_BRANCH}" == "master" ] && [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + make container + docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; + make push; + fi diff --git a/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md new file mode 100644 index 00000000..41b73b76 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) diff --git a/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock b/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock new file mode 100644 index 00000000..72697712 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Dockerfile.mock @@ -0,0 +1,6 @@ +FROM alpine +LABEL maintainers="Kubernetes Authors" +LABEL description="CSI Mock Driver" + +COPY ./bin/mock mock +ENTRYPOINT ["/mock"] diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock new file mode 100644 index 00000000..443ad970 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.lock @@ -0,0 +1,237 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:26ee2356254e58b9872ba736f66aff1c54a26f08c7d16afbf49695131a87d454" + name = "github.com/container-storage-interface/spec" + packages = ["lib/go/csi"] + pruneopts = "UT" + revision = "8efcc85c45550571fba8134182013ed7dc34038a" + version = "v1.0.0-rc2" + +[[projects]] + digest = "1:bc38c7c481812e178d85160472e231c5e1c9a7f5845d67e23ee4e706933c10d8" + name = "github.com/golang/mock" + packages = ["gomock"] + pruneopts = "UT" + revision = "c34cdb4725f4c3844d095133c6e40e448b86589b" + version = "v1.1.1" + +[[projects]] + digest = "1:588beb9f80d2b0afddf05663b32d01c867da419458b560471d81cca0286e76b8" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + digest = "1:72f35d3e412bc67b121e15ea4c88a3b3da8bcbc2264339e7ffa4a1865799840c" + name = "github.com/onsi/ginkgo" + packages = [ + ".", + "config", + "internal/codelocation", + "internal/containernode", + "internal/failer", + "internal/leafnodes", + "internal/remote", + "internal/spec", + "internal/spec_iterator", + "internal/specrunner", + "internal/suite", + "internal/testingtproxy", + "internal/writer", + "reporters", + "reporters/stenographer", + "reporters/stenographer/support/go-colorable", + "reporters/stenographer/support/go-isatty", + "types", + ] + pruneopts = "UT" + revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf" + version = "v1.5.0" + +[[projects]] + digest = "1:d0c2c4e2d0006cd28c220a549cda1de8e67abc65ed4c572421492bbf0492ceaf" + name = "github.com/onsi/gomega" + packages = [ + ".", + "format", + "internal/assertion", + "internal/asyncassertion", + "internal/oraclematcher", + "internal/testingtsupport", + "matchers", + "matchers/support/goraph/bipartitegraph", + "matchers/support/goraph/edge", + "matchers/support/goraph/node", + "matchers/support/goraph/util", + "types", + ] + pruneopts = "UT" + revision = "62bff4df71bdbc266561a0caee19f0594b17c240" + version = "v1.4.0" + +[[projects]] + digest = "1:9e9193aa51197513b3abcb108970d831fbcf40ef96aa845c4f03276e1fa316d2" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "UT" + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" + +[[projects]] + branch = "master" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "UT" + revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" + +[[projects]] + branch = "master" + digest = "1:0bb2e6ef036484991ed446a6c698698b8901766981d4d22cc8e53fedb09709ac" + name = "golang.org/x/net" + packages = [ + "context", + "html", + "html/atom", + "html/charset", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1e491301e022f8f977054da4c2d852decd59571f" + +[[projects]] + branch = "master" + digest = "1:8fbfc6ea1a8a078697633be97f07dd83a83d32a96959d42195464c13c25be374" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "UT" + revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb" + +[[projects]] + digest = "1:436b24586f8fee329e0dd65fd67c817681420cda1d7f934345c13fe78c212a73" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "internal/utf8internal", + "language", + "runes", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:601e63e7d4577f907118bec825902505291918859d223bce015539e79f1160e3" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "32ee49c4dd805befd833990acba36cb75042378c" + +[[projects]] + digest = "1:7a977fdcd5abff03e94f92e7b374ef37e91c7c389581e5c4348fa98616e6c6be" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "channelz", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclb/grpc_lb_v1/messages", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "reflection", + "reflection/grpc_reflection_v1alpha", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + "transport", + ] + pruneopts = "UT" + revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" + version = "v1.12.2" + +[[projects]] + digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/container-storage-interface/spec/lib/go/csi", + "github.com/golang/mock/gomock", + "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes", + "github.com/golang/protobuf/ptypes/wrappers", + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", + "github.com/sirupsen/logrus", + "golang.org/x/net/context", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/connectivity", + "google.golang.org/grpc/reflection", + "google.golang.org/grpc/status", + "gopkg.in/yaml.v2", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml new file mode 100644 index 00000000..4e0836d0 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Gopkg.toml @@ -0,0 +1,62 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/container-storage-interface/spec" + version = "v1.0.0-rc2" + +[[constraint]] + name = "github.com/golang/mock" + version = "1.0.0" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "v1.2.0" + +[[constraint]] + name = "github.com/onsi/ginkgo" + version = "1.4.0" + +[[constraint]] + name = "github.com/onsi/gomega" + version = "1.3.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.9.2" + +[[constraint]] + name = "gopkg.in/yaml.v2" + version = "v2.1.1" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/github.com/kubernetes-csi/csi-test/Makefile b/vendor/github.com/kubernetes-csi/csi-test/Makefile new file mode 100644 index 00000000..7fb42c87 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/Makefile @@ -0,0 +1,52 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +IMAGE_NAME = quay.io/k8scsi/mock-driver +IMAGE_VERSION = canary +APP := ./bin/mock + + +ifdef V +TESTARGS = -v -args -alsologtostderr -v 5 +else +TESTARGS = +endif + +all: $(APP) + +$(APP): + mkdir -p bin + CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o $(APP) ./mock/main.go + +clean: + rm -rf bin + +container: $(APP) + docker build -f Dockerfile.mock -t $(IMAGE_NAME):$(IMAGE_VERSION) . + +push: container + docker push $(IMAGE_NAME):$(IMAGE_VERSION) + +test: $(APP) + files=$$(find ./ -name '*.go' | grep -v '^./vendor' ); \ + if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ + echo "formatting errors:"; \ + gofmt -d $$files; \ + false; \ + fi + go vet $$(go list ./... | grep -v vendor) + go test $$(go list ./... | grep -v vendor | grep -v "cmd/csi-sanity") + ./hack/e2e.sh + +.PHONY: all clean container push test diff --git a/vendor/github.com/kubernetes-csi/csi-test/OWNERS b/vendor/github.com/kubernetes-csi/csi-test/OWNERS new file mode 100644 index 00000000..a780cce6 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/OWNERS @@ -0,0 +1,4 @@ +approvers: +- saad-ali +- lpabon +- pohly diff --git a/vendor/github.com/kubernetes-csi/csi-test/README.md b/vendor/github.com/kubernetes-csi/csi-test/README.md index e21d8399..36dce60b 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/README.md @@ -1,15 +1,42 @@ [![Build Status](https://travis-ci.org/kubernetes-csi/csi-test.svg?branch=master)](https://travis-ci.org/kubernetes-csi/csi-test) +[![Docker Repository on Quay](https://quay.io/repository/k8scsi/mock-driver/status "Docker Repository on +Quay")](https://quay.io/repository/k8scsi/mock-driver) + # csi-test csi-test houses packages and libraries to help test CSI client and plugins. -## For Container Orchestration Unit Tests +## For Container Orchestration Tests CO developers can use this framework to create drivers based on the [Golang mock](https://github.com/golang/mock) framework. Please see [co_test.go](test/co_test.go) for an example. -## For CSI Driver Unit Tests -To test drivers please take a look at [pkg/sanity](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity) +### Mock driver for testing +We also provide a container called `quay.io/k8scsi/mock-driver:canary` which can be used as an in-memory mock driver. +It follows the same release cycle as other containers, so the latest release is `quay.io/k8scsi/mock-driver:v0.3.0`. + +You will need to setup the environment variable `CSI_ENDPOINT` for the mock driver to know where to create the unix +domain socket. + +## For CSI Driver Tests +To test drivers please take a look at [pkg/sanity](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity). +This package and [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) are meant to test +the CSI API capability of a driver. They are meant to be an additional test to the unit, functional, and e2e tests of a +CSI driver. ### Note +* Master is for CSI v0.4.0. Please see the branches for other CSI releases. * Only Golang 1.9+ supported. See [gRPC issue](https://github.com/grpc/grpc-go/issues/711#issuecomment-326626790) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack channel](https://kubernetes.slack.com/messages/sig-storage) +- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-storage) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS new file mode 100644 index 00000000..00e28e4e --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +saad-ali +lpabon diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile index b0ecbeac..520c2153 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/Makefile @@ -1,5 +1,6 @@ APP_NAME := csi-sanity VER :=$(shell git describe) +RELEASEVER := $(shell git describe --abbrev=0) BRANCH := $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) SHA := $(shell git rev-parse --short HEAD) ARCH := $(shell go env GOARCH) @@ -17,7 +18,7 @@ endif endif LDFLAGS :=-ldflags "-w -X github.com/kubernetes-csi/csi-test/cmd/csi-sanity.VERSION=$(VERSION) -extldflags '-z relro -z now'" -PACKAGE :=$(DIR)/dist/$(APP_NAME)-$(VERSION).$(GOOS).$(ARCH).tar.gz +PACKAGE :=$(DIR)/dist/$(APP_NAME)-$(RELEASEVER).$(GOOS).$(ARCH).tar.gz all: $(APP_NAME) @@ -29,9 +30,11 @@ install: $(APP_NAME) clean: rm -f csi-sanity + +dist-clean: rm -rf $(DIR)/dist -dist: $(PACKAGE) +dist: clean $(PACKAGE) $(PACKAGE): $(APP_NAME) @echo Packaging Binaries... @@ -46,16 +49,13 @@ $(PACKAGE): $(APP_NAME) linux_amd64_dist: GOOS=linux GOARCH=amd64 $(MAKE) dist -linux_arm_dist: - GOOS=linux GOARCH=arm $(MAKE) dist - linux_arm64_dist: GOOS=linux GOARCH=arm64 $(MAKE) dist darwin_amd64_dist: GOOS=darwin GOARCH=amd64 $(MAKE) dist -release: darwin_amd64_dist linux_arm_dist linux_amd64_dist linux_arm64_dist +release: dist-clean darwin_amd64_dist linux_amd64_dist linux_arm64_dist .PHONY: release darwin_amd64_dist linux_arm64_dist linux_amd64_dist \ - linux_arm_dist linux_amd64_dist clean + linux_arm_dist linux_amd64_dist clean dist-clean diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md index 36c282ad..dade1018 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/README.md @@ -7,12 +7,42 @@ Example: $ csi-sanity --csi.endpoint= ``` +If you want to specify a mount point: + +``` +$ csi-sanity --csi.endpoint= --csi.mountpoint=/mnt +``` + For verbose type: ``` $ csi-sanity --ginkgo.v --csi.endpoint= ``` +For csi-credentials, create a secrets file with all the secrets in it: +```yaml +CreateVolumeSecret: + secretKey: secretval1 +DeleteVolumeSecret: + secretKey: secretval2 +ControllerPublishVolumeSecret: + secretKey: secretval3 +ControllerUnpublishVolumeSecret: + secretKey: secretval4 +NodeStageVolumeSecret: + secretKey: secretval5 +NodePublishVolumeSecret: + secretKey: secretval6 +``` + +Pass the file path to csi-sanity as: +``` +$ csi-sanity --csi.endpoint= --csi.secrets= +``` + +Replace the keys and values of the credentials appropriately. Since the whole +secret is passed in the request, multiple key-val pairs can be used. + ### Help The full Ginkgo and golang unit test parameters are available. Type diff --git a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go index 88793f96..4b2d352c 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/sanity_test.go @@ -18,6 +18,7 @@ package sanity import ( "flag" "fmt" + "os" "testing" "github.com/kubernetes-csi/csi-test/pkg/sanity" @@ -28,14 +29,19 @@ const ( ) var ( - VERSION = "(dev)" - endpoint string - version bool + VERSION = "(dev)" + version bool + config sanity.Config ) func init() { - flag.StringVar(&endpoint, prefix+"endpoint", "", "CSI endpoint") + flag.StringVar(&config.Address, prefix+"endpoint", "", "CSI endpoint") flag.BoolVar(&version, prefix+"version", false, "Version of this program") + flag.StringVar(&config.TargetPath, prefix+"mountdir", os.TempDir()+"/csi", "Mount point for NodePublish") + flag.StringVar(&config.StagingPath, prefix+"stagingdir", os.TempDir()+"/csi", "Mount point for NodeStage if staging is supported") + flag.StringVar(&config.SecretsFile, prefix+"secrets", "", "CSI secrets file") + flag.Int64Var(&config.TestVolumeSize, prefix+"testvolumesize", sanity.DefTestVolumeSize, "Base volume size used for provisioned volumes") + flag.StringVar(&config.TestVolumeParametersFile, prefix+"testvolumeparameters", "", "YAML file of volume parameters for provisioned volumes") flag.Parse() } @@ -44,8 +50,8 @@ func TestSanity(t *testing.T) { fmt.Printf("Version = %s\n", VERSION) return } - if len(endpoint) == 0 { + if len(config.Address) == 0 { t.Fatalf("--%sendpoint must be provided with an CSI endpoint", prefix) } - sanity.Test(t, endpoint) + sanity.Test(t, &config) } diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go index 94145df2..01224a3a 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.go @@ -19,122 +19,258 @@ limitations under the License. package driver import ( + "context" + "encoding/json" + "errors" + "fmt" "net" "sync" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/csi-test/utils" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ) -type MockCSIDriverServers struct { - Controller *MockControllerServer - Identity *MockIdentityServer - Node *MockNodeServer +var ( + // ErrNoCredentials is the error when a secret is enabled but not passed in the request. + ErrNoCredentials = errors.New("secret must be provided") + // ErrAuthFailed is the error when the secret is incorrect. + ErrAuthFailed = errors.New("authentication failed") +) + +type CSIDriverServers struct { + Controller csi.ControllerServer + Identity csi.IdentityServer + Node csi.NodeServer +} + +// This is the key name in all the CSI secret objects. +const secretField = "secretKey" + +// CSICreds is a driver specific secret type. Drivers can have a key-val pair of +// secrets. This mock driver has a single string secret with secretField as the +// key. +type CSICreds struct { + CreateVolumeSecret string + DeleteVolumeSecret string + ControllerPublishVolumeSecret string + ControllerUnpublishVolumeSecret string + NodeStageVolumeSecret string + NodePublishVolumeSecret string + CreateSnapshotSecret string + DeleteSnapshotSecret string } -type MockCSIDriver struct { +type CSIDriver struct { listener net.Listener server *grpc.Server - conn *grpc.ClientConn - servers *MockCSIDriverServers + servers *CSIDriverServers wg sync.WaitGroup running bool lock sync.Mutex + creds *CSICreds } -func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { - return &MockCSIDriver{ +func NewCSIDriver(servers *CSIDriverServers) *CSIDriver { + return &CSIDriver{ servers: servers, } } -func (m *MockCSIDriver) goServe(started chan<- bool) { - m.wg.Add(1) +func (c *CSIDriver) goServe(started chan<- bool) { + c.wg.Add(1) go func() { - defer m.wg.Done() + defer c.wg.Done() started <- true - err := m.server.Serve(m.listener) + err := c.server.Serve(c.listener) if err != nil { panic(err.Error()) } }() } -func (m *MockCSIDriver) Address() string { - return m.listener.Addr().String() +func (c *CSIDriver) Address() string { + return c.listener.Addr().String() } -func (m *MockCSIDriver) Start() error { - m.lock.Lock() - defer m.lock.Unlock() +func (c *CSIDriver) Start(l net.Listener) error { + c.lock.Lock() + defer c.lock.Unlock() - // Listen on a port assigned by the net package - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return err - } - m.listener = l + // Set listener + c.listener = l // Create a new grpc server - m.server = grpc.NewServer() + c.server = grpc.NewServer( + grpc.UnaryInterceptor(c.callInterceptor), + ) // Register Mock servers - if m.servers.Controller != nil { - csi.RegisterControllerServer(m.server, m.servers.Controller) + if c.servers.Controller != nil { + csi.RegisterControllerServer(c.server, c.servers.Controller) } - if m.servers.Identity != nil { - csi.RegisterIdentityServer(m.server, m.servers.Identity) + if c.servers.Identity != nil { + csi.RegisterIdentityServer(c.server, c.servers.Identity) } - if m.servers.Node != nil { - csi.RegisterNodeServer(m.server, m.servers.Node) + if c.servers.Node != nil { + csi.RegisterNodeServer(c.server, c.servers.Node) } - reflection.Register(m.server) + reflection.Register(c.server) // Start listening for requests waitForServer := make(chan bool) - m.goServe(waitForServer) + c.goServe(waitForServer) <-waitForServer - m.running = true + c.running = true return nil } -func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { - // Start server - err := m.Start() - if err != nil { - return nil, err +func (c *CSIDriver) Stop() { + c.lock.Lock() + defer c.lock.Unlock() + + if !c.running { + return } - // Create a client connection - m.conn, err = utils.Connect(m.Address()) + c.server.Stop() + c.wg.Wait() +} + +func (c *CSIDriver) Close() { + c.server.Stop() +} + +func (c *CSIDriver) IsRunning() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.running +} + +// SetDefaultCreds sets the default secrets for CSI creds. +func (c *CSIDriver) SetDefaultCreds() { + c.creds = &CSICreds{ + CreateVolumeSecret: "secretval1", + DeleteVolumeSecret: "secretval2", + ControllerPublishVolumeSecret: "secretval3", + ControllerUnpublishVolumeSecret: "secretval4", + NodeStageVolumeSecret: "secretval5", + NodePublishVolumeSecret: "secretval6", + CreateSnapshotSecret: "secretval7", + DeleteSnapshotSecret: "secretval8", + } +} + +func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := c.authInterceptor(req) if err != nil { + logGRPC(info.FullMethod, req, nil, err) return nil, err } + rsp, err := handler(ctx, req) + logGRPC(info.FullMethod, req, rsp, err) + return rsp, err +} - return m.conn, nil +func (c *CSIDriver) authInterceptor(req interface{}) error { + if c.creds != nil { + authenticated, authErr := isAuthenticated(req, c.creds) + if !authenticated { + if authErr == ErrNoCredentials { + return status.Error(codes.InvalidArgument, authErr.Error()) + } + if authErr == ErrAuthFailed { + return status.Error(codes.Unauthenticated, authErr.Error()) + } + } + } + return nil } -func (m *MockCSIDriver) Stop() { - m.lock.Lock() - defer m.lock.Unlock() +func logGRPC(method string, request, reply interface{}, err error) { + // Log JSON with the request and response for easier parsing + logMessage := struct { + Method string + Request interface{} + Response interface{} + Error string + }{ + Method: method, + Request: request, + Response: reply, + } + if err != nil { + logMessage.Error = err.Error() + } + msg, _ := json.Marshal(logMessage) + fmt.Printf("gRPCCall: %s\n", msg) +} - if !m.running { - return +func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { + switch r := req.(type) { + case *csi.CreateVolumeRequest: + return authenticateCreateVolume(r, creds) + case *csi.DeleteVolumeRequest: + return authenticateDeleteVolume(r, creds) + case *csi.ControllerPublishVolumeRequest: + return authenticateControllerPublishVolume(r, creds) + case *csi.ControllerUnpublishVolumeRequest: + return authenticateControllerUnpublishVolume(r, creds) + case *csi.NodeStageVolumeRequest: + return authenticateNodeStageVolume(r, creds) + case *csi.NodePublishVolumeRequest: + return authenticateNodePublishVolume(r, creds) + case *csi.CreateSnapshotRequest: + return authenticateCreateSnapshot(r, creds) + case *csi.DeleteSnapshotRequest: + return authenticateDeleteSnapshot(r, creds) + default: + return true, nil } +} - m.server.Stop() - m.wg.Wait() +func authenticateCreateVolume(req *csi.CreateVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateVolumeSecret) } -func (m *MockCSIDriver) Close() { - m.conn.Close() - m.server.Stop() +func authenticateDeleteVolume(req *csi.DeleteVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteVolumeSecret) } -func (m *MockCSIDriver) IsRunning() bool { - m.lock.Lock() - defer m.lock.Unlock() +func authenticateControllerPublishVolume(req *csi.ControllerPublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerPublishVolumeSecret) +} + +func authenticateControllerUnpublishVolume(req *csi.ControllerUnpublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerUnpublishVolumeSecret) +} - return m.running +func authenticateNodeStageVolume(req *csi.NodeStageVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.NodeStageVolumeSecret) +} + +func authenticateNodePublishVolume(req *csi.NodePublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.NodePublishVolumeSecret) +} + +func authenticateCreateSnapshot(req *csi.CreateSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateSnapshotSecret) +} + +func authenticateDeleteSnapshot(req *csi.DeleteSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteSnapshotSecret) +} + +func credsCheck(secrets map[string]string, secretVal string) (bool, error) { + if len(secrets) == 0 { + return false, ErrNoCredentials + } + + if secrets[secretField] != secretVal { + return false, ErrAuthFailed + } + return true, nil } diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go index ed14e019..c54acaad 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go @@ -34,6 +34,19 @@ func (m *MockIdentityServer) EXPECT() *MockIdentityServerMockRecorder { return m.recorder } +// GetPluginCapabilities mocks base method +func (m *MockIdentityServer) GetPluginCapabilities(arg0 context.Context, arg1 *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "GetPluginCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPluginCapabilities indicates an expected call of GetPluginCapabilities +func (mr *MockIdentityServerMockRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginCapabilities", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginCapabilities), arg0, arg1) +} + // GetPluginInfo mocks base method func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { ret := m.ctrl.Call(m, "GetPluginInfo", arg0, arg1) @@ -47,17 +60,17 @@ func (mr *MockIdentityServerMockRecorder) GetPluginInfo(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginInfo), arg0, arg1) } -// GetSupportedVersions mocks base method -func (m *MockIdentityServer) GetSupportedVersions(arg0 context.Context, arg1 *csi.GetSupportedVersionsRequest) (*csi.GetSupportedVersionsResponse, error) { - ret := m.ctrl.Call(m, "GetSupportedVersions", arg0, arg1) - ret0, _ := ret[0].(*csi.GetSupportedVersionsResponse) +// Probe mocks base method +func (m *MockIdentityServer) Probe(arg0 context.Context, arg1 *csi.ProbeRequest) (*csi.ProbeResponse, error) { + ret := m.ctrl.Call(m, "Probe", arg0, arg1) + ret0, _ := ret[0].(*csi.ProbeResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetSupportedVersions indicates an expected call of GetSupportedVersions -func (mr *MockIdentityServerMockRecorder) GetSupportedVersions(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSupportedVersions", reflect.TypeOf((*MockIdentityServer)(nil).GetSupportedVersions), arg0, arg1) +// Probe indicates an expected call of Probe +func (mr *MockIdentityServerMockRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Probe", reflect.TypeOf((*MockIdentityServer)(nil).Probe), arg0, arg1) } // MockControllerServer is a mock of ControllerServer interface @@ -96,19 +109,6 @@ func (mr *MockControllerServerMockRecorder) ControllerGetCapabilities(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetCapabilities), arg0, arg1) } -// ControllerProbe mocks base method -func (m *MockControllerServer) ControllerProbe(arg0 context.Context, arg1 *csi.ControllerProbeRequest) (*csi.ControllerProbeResponse, error) { - ret := m.ctrl.Call(m, "ControllerProbe", arg0, arg1) - ret0, _ := ret[0].(*csi.ControllerProbeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ControllerProbe indicates an expected call of ControllerProbe -func (mr *MockControllerServerMockRecorder) ControllerProbe(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerProbe", reflect.TypeOf((*MockControllerServer)(nil).ControllerProbe), arg0, arg1) -} - // ControllerPublishVolume mocks base method func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { ret := m.ctrl.Call(m, "ControllerPublishVolume", arg0, arg1) @@ -135,6 +135,19 @@ func (mr *MockControllerServerMockRecorder) ControllerUnpublishVolume(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerUnpublishVolume), arg0, arg1) } +// CreateSnapshot mocks base method +func (m *MockControllerServer) CreateSnapshot(arg0 context.Context, arg1 *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateSnapshotResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSnapshot indicates an expected call of CreateSnapshot +func (mr *MockControllerServerMockRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockControllerServer)(nil).CreateSnapshot), arg0, arg1) +} + // CreateVolume mocks base method func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1) @@ -148,6 +161,19 @@ func (mr *MockControllerServerMockRecorder) CreateVolume(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerServer)(nil).CreateVolume), arg0, arg1) } +// DeleteSnapshot mocks base method +func (m *MockControllerServer) DeleteSnapshot(arg0 context.Context, arg1 *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteSnapshotResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSnapshot indicates an expected call of DeleteSnapshot +func (mr *MockControllerServerMockRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockControllerServer)(nil).DeleteSnapshot), arg0, arg1) +} + // DeleteVolume mocks base method func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { ret := m.ctrl.Call(m, "DeleteVolume", arg0, arg1) @@ -174,6 +200,19 @@ func (mr *MockControllerServerMockRecorder) GetCapacity(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerServer)(nil).GetCapacity), arg0, arg1) } +// ListSnapshots mocks base method +func (m *MockControllerServer) ListSnapshots(arg0 context.Context, arg1 *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + ret := m.ctrl.Call(m, "ListSnapshots", arg0, arg1) + ret0, _ := ret[0].(*csi.ListSnapshotsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSnapshots indicates an expected call of ListSnapshots +func (mr *MockControllerServerMockRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSnapshots", reflect.TypeOf((*MockControllerServer)(nil).ListSnapshots), arg0, arg1) +} + // ListVolumes mocks base method func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { ret := m.ctrl.Call(m, "ListVolumes", arg0, arg1) @@ -223,19 +262,6 @@ func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder { return m.recorder } -// GetNodeID mocks base method -func (m *MockNodeServer) GetNodeID(arg0 context.Context, arg1 *csi.GetNodeIDRequest) (*csi.GetNodeIDResponse, error) { - ret := m.ctrl.Call(m, "GetNodeID", arg0, arg1) - ret0, _ := ret[0].(*csi.GetNodeIDResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNodeID indicates an expected call of GetNodeID -func (mr *MockNodeServerMockRecorder) GetNodeID(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeID", reflect.TypeOf((*MockNodeServer)(nil).GetNodeID), arg0, arg1) -} - // NodeGetCapabilities mocks base method func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) @@ -249,17 +275,30 @@ func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) } -// NodeProbe mocks base method -func (m *MockNodeServer) NodeProbe(arg0 context.Context, arg1 *csi.NodeProbeRequest) (*csi.NodeProbeResponse, error) { - ret := m.ctrl.Call(m, "NodeProbe", arg0, arg1) - ret0, _ := ret[0].(*csi.NodeProbeResponse) +// NodeGetInfo mocks base method +func (m *MockNodeServer) NodeGetInfo(arg0 context.Context, arg1 *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + ret := m.ctrl.Call(m, "NodeGetInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetInfoResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// NodeProbe indicates an expected call of NodeProbe -func (mr *MockNodeServerMockRecorder) NodeProbe(arg0, arg1 interface{}) *gomock.Call { - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeProbe", reflect.TypeOf((*MockNodeServer)(nil).NodeProbe), arg0, arg1) +// NodeGetInfo indicates an expected call of NodeGetInfo +func (mr *MockNodeServerMockRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetInfo", reflect.TypeOf((*MockNodeServer)(nil).NodeGetInfo), arg0, arg1) +} + +// NodeGetVolumeStats mocks base method +func (m *MockNodeServer) NodeGetVolumeStats(arg0 context.Context, arg1 *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + ret := m.ctrl.Call(m, "NodeGetVolumeStats", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetVolumeStatsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetVolumeStats indicates an expected call of NodeGetVolumeStats +func (mr *MockNodeServerMockRecorder) NodeGetVolumeStats(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetVolumeStats", reflect.TypeOf((*MockNodeServer)(nil).NodeGetVolumeStats), arg0, arg1) } // NodePublishVolume mocks base method @@ -275,6 +314,19 @@ func (mr *MockNodeServerMockRecorder) NodePublishVolume(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodePublishVolume), arg0, arg1) } +// NodeStageVolume mocks base method +func (m *MockNodeServer) NodeStageVolume(arg0 context.Context, arg1 *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeStageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeStageVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeStageVolume indicates an expected call of NodeStageVolume +func (mr *MockNodeServerMockRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeStageVolume), arg0, arg1) +} + // NodeUnpublishVolume mocks base method func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { ret := m.ctrl.Call(m, "NodeUnpublishVolume", arg0, arg1) @@ -287,3 +339,16 @@ func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.Nod func (mr *MockNodeServerMockRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnpublishVolume), arg0, arg1) } + +// NodeUnstageVolume mocks base method +func (m *MockNodeServer) NodeUnstageVolume(arg0 context.Context, arg1 *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnstageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnstageVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeUnstageVolume indicates an expected call of NodeUnstageVolume +func (mr *MockNodeServerMockRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnstageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnstageVolume), arg0, arg1) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go b/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go new file mode 100644 index 00000000..9b051eee --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/driver/mock.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "net" + + "github.com/kubernetes-csi/csi-test/utils" + "google.golang.org/grpc" +) + +type MockCSIDriverServers struct { + Controller *MockControllerServer + Identity *MockIdentityServer + Node *MockNodeServer +} + +type MockCSIDriver struct { + CSIDriver + conn *grpc.ClientConn +} + +func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { + return &MockCSIDriver{ + CSIDriver: CSIDriver{ + servers: &CSIDriverServers{ + Controller: servers.Controller, + Node: servers.Node, + Identity: servers.Identity, + }, + }, + } +} + +func (m *MockCSIDriver) Start() error { + // Listen on a port assigned by the net package + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return err + } + + if err := m.CSIDriver.Start(l); err != nil { + l.Close() + return err + } + + return nil +} + +func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { + // Start server + err := m.Start() + if err != nil { + return nil, err + } + + // Create a client connection + m.conn, err = utils.Connect(m.Address()) + if err != nil { + return nil, err + } + + return m.conn, nil +} + +func (m *MockCSIDriver) Close() { + m.conn.Close() + m.server.Stop() +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/glide.lock b/vendor/github.com/kubernetes-csi/csi-test/glide.lock deleted file mode 100644 index 58bd54a4..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/glide.lock +++ /dev/null @@ -1,135 +0,0 @@ -hash: f8f39aef239d83f930c5be2717e5bee5b2169902a3fd4a30a441a4e97ec60a07 -updated: 2017-12-13T08:17:19.928367307-05:00 -imports: -- name: github.com/container-storage-interface/spec - version: 4ac2d13f89360f2da40d188473d77f2ec56b9d0d - subpackages: - - lib/go/csi -- name: github.com/davecgh/go-spew - version: ecdeabc65495df2dec95d7c4a4c3e021903035e5 - subpackages: - - spew -- name: github.com/golang/mock - version: f67f7081ddcd0f92a20c1d58e7cd8b23253d15c7 - subpackages: - - gomock -- name: github.com/golang/protobuf - version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 - subpackages: - - proto - - protoc-gen-go/descriptor - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/timestamp -- name: github.com/onsi/ginkgo - version: bc14b6691e7a788e12a21121abdaff1ccdcef9e9 - subpackages: - - config - - internal/codelocation - - internal/containernode - - internal/failer - - internal/leafnodes - - internal/remote - - internal/spec - - internal/spec_iterator - - internal/specrunner - - internal/suite - - internal/testingtproxy - - internal/writer - - reporters - - reporters/stenographer - - reporters/stenographer/support/go-colorable - - reporters/stenographer/support/go-isatty - - types -- name: github.com/onsi/gomega - version: c1fb6682134d162f37c13f42e7157653a7de7d2b - subpackages: - - format - - internal/assertion - - internal/asyncassertion - - internal/oraclematcher - - internal/testingtsupport - - matchers - - matchers/support/goraph/bipartitegraph - - matchers/support/goraph/edge - - matchers/support/goraph/node - - matchers/support/goraph/util - - types -- name: github.com/pmezard/go-difflib - version: 792786c7400a136282c1664665ae0a8db921c6c2 - subpackages: - - difflib -- name: github.com/stretchr/testify - version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f - subpackages: - - assert -- name: golang.org/x/net - version: 5561cd9b4330353950f399814f427425c0a26fd2 - subpackages: - - context - - html - - html/atom - - html/charset - - http2 - - http2/hpack - - idna - - internal/timeseries - - lex/httplex - - trace -- name: golang.org/x/sys - version: d5840adf789d732bc8b00f37b26ca956a7cc8e79 - subpackages: - - unix -- name: golang.org/x/text - version: c01e4764d870b77f8abe5096ee19ad20d80e8075 - subpackages: - - encoding - - encoding/charmap - - encoding/htmlindex - - encoding/internal - - encoding/internal/identifier - - encoding/japanese - - encoding/korean - - encoding/simplifiedchinese - - encoding/traditionalchinese - - encoding/unicode - - internal/tag - - internal/utf8internal - - language - - runes - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: google.golang.org/genproto - version: f676e0f3ac6395ff1a529ae59a6670878a8371a6 - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: 1687ce5770e998bcac6a136af6b52f079b9d902b - subpackages: - - balancer - - balancer/roundrobin - - codes - - connectivity - - credentials - - grpclb/grpc_lb_v1/messages - - grpclog - - internal - - keepalive - - metadata - - naming - - peer - - reflection - - reflection/grpc_reflection_v1alpha - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap - - transport -- name: gopkg.in/yaml.v2 - version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 -testImports: [] diff --git a/vendor/github.com/kubernetes-csi/csi-test/glide.yaml b/vendor/github.com/kubernetes-csi/csi-test/glide.yaml deleted file mode 100644 index b04e40ed..00000000 --- a/vendor/github.com/kubernetes-csi/csi-test/glide.yaml +++ /dev/null @@ -1,16 +0,0 @@ -package: github.com/kubernetes-csi/csi-test -import: -- package: github.com/container-storage-interface/spec - subpackages: - - lib/go/csi -- package: google.golang.org/grpc - subpackages: - - reflection -testImport: -- package: github.com/golang/mock - subpackages: - - gomock - - mockgen -- package: golang.org/x/net - subpackages: - - context diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go b/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go new file mode 100644 index 00000000..10ea5f35 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/_apitest/api_test.go @@ -0,0 +1,18 @@ +package apitest + +import ( + "os" + "testing" + + "github.com/kubernetes-csi/csi-test/pkg/sanity" +) + +func TestMyDriver(t *testing.T) { + config := &sanity.Config{ + TargetPath: os.TempDir() + "/csi", + StagingPath: os.TempDir() + "/csi", + Address: "/tmp/e2e-csi-sanity.sock", + } + + sanity.Test(t, config) +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go b/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go new file mode 100644 index 00000000..bca267cb --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/_embedded/embedded_test.go @@ -0,0 +1,42 @@ +package embedded + +import ( + "os" + "testing" + + "github.com/kubernetes-csi/csi-test/pkg/sanity" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestMyDriverGinkgo(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CSI Sanity Test Suite") +} + +// The test suite into which the sanity tests get embedded may already +// have before/after suite functions. There can only be one such +// function. Here we define empty ones because then Ginkgo +// will start complaining at runtime when invoking the embedded case +// in hack/e2e.sh if a PR adds back such functions in the sanity test +// code. +var _ = BeforeSuite(func() {}) +var _ = AfterSuite(func() {}) + +var _ = Describe("MyCSIDriver", func() { + Context("Config A", func() { + config := &sanity.Config{ + TargetPath: os.TempDir() + "/csi", + StagingPath: os.TempDir() + "/csi", + Address: "/tmp/e2e-csi-sanity.sock", + } + + BeforeEach(func() {}) + + AfterEach(func() {}) + + Describe("CSI Driver Test Suite", func() { + sanity.GinkgoTest(config) + }) + }) +}) diff --git a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh index a3ae9162..baf4c304 100755 --- a/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh +++ b/vendor/github.com/kubernetes-csi/csi-test/hack/e2e.sh @@ -1,32 +1,72 @@ #!/bin/bash -CSI_ENDPOINTS="tcp://127.0.0.1:9998" -CSI_ENDPOINTS="$CSI_ENDPOINTS /tmp/e2e-csi-sanity.sock" -CSI_ENDPOINTS="$CSI_ENDPOINTS unix:///tmp/e2e-csi-sanity.sock" +TESTARGS=$@ +UDS="/tmp/e2e-csi-sanity.sock" +CSI_ENDPOINTS="$CSI_ENDPOINTS ${UDS}" +CSI_MOCK_VERSION="master" -go get -u github.com/thecodeteam/gocsi/mock -cd cmd/csi-sanity - make clean install || exit 1 -cd ../.. +# +# $1 - endpoint for mock. +# $2 - endpoint for csi-sanity in Grpc format. +# See https://github.com/grpc/grpc/blob/master/doc/naming.md +runTest() +{ + CSI_ENDPOINT=$1 ./bin/mock & + local pid=$! -for endpoint in $CSI_ENDPOINTS ; do - if ! echo $endpoint | grep tcp > /dev/null 2>&1 ; then - rm -f $endpoint + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2; ret=$? + kill -9 $pid + + if [ $ret -ne 0 ] ; then + exit $ret fi +} - CSI_ENDPOINT=$endpoint mock & - pid=$! +runTestWithCreds() +{ + CSI_ENDPOINT=$1 CSI_ENABLE_CREDS=true ./bin/mock & + local pid=$! - csi-sanity $@ --ginkgo.skip=MOCKERRORS --csi.endpoint=$endpoint ; ret=$? + ./cmd/csi-sanity/csi-sanity $TESTARGS --csi.endpoint=$2 --csi.secrets=mock/mocksecret.yaml; ret=$? kill -9 $pid - if ! echo $endpoint | grep tcp > /dev/null 2>&1 ; then - rm -f $endpoint + if [ $ret -ne 0 ] ; then + exit $ret fi +} + +runTestAPI() +{ + CSI_ENDPOINT=$1 ./bin/mock & + local pid=$! + + GOCACHE=off go test -v ./hack/_apitest/api_test.go; ret=$? if [ $ret -ne 0 ] ; then exit $ret fi -done + + GOCACHE=off go test -v ./hack/_embedded/embedded_test.go; ret=$? + kill -9 $pid + + if [ $ret -ne 0 ] ; then + exit $ret + fi +} + +make + +cd cmd/csi-sanity + make clean install || exit 1 +cd ../.. + +runTest "${UDS}" "${UDS}" +rm -f $UDS + +runTestWithCreds "${UDS}" "${UDS}" +rm -f $UDS + +runTestAPI "${UDS}" +rm -f $UDS exit 0 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS b/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS new file mode 100644 index 00000000..23eabcd2 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/AUTHORS @@ -0,0 +1,2 @@ +TheCodeTeam +Kubernetes Authors diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/README.md b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md new file mode 100644 index 00000000..8274aa2c --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/README.md @@ -0,0 +1,22 @@ +# Mock CSI Driver +Extremely simple mock driver used to test `csi-sanity` based on `rexray/gocsi/mock`. +It can be used for testing of Container Orchestrators that implement client side +of CSI interface. + +``` +Usage of mock: + -disable-attach + Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability. + -name string + CSI driver name. (default "io.kubernetes.storage.mock") +``` + +It prints all received CSI messages to stdout encoded as json, so a test can check that +CO sent the right CSI message. + +Example of such output: + +``` +gRPCCall: {"Method":"/csi.v0.Controller/ControllerGetCapabilities","Request":{},"Response":{"capabilities":[{"Type":{"Rpc":{"type":1}}},{"Type":{"Rpc":{"type":3}}},{"Type":{"Rpc":{"type":4}}},{"Type":{"Rpc":{"type":6}}},{"Type":{"Rpc":{"type":5}}},{"Type":{"Rpc":{"type":2}}}]},"Error":""} +gRPCCall: {"Method":"/csi.v0.Controller/ControllerPublishVolume","Request":{"volume_id":"12","node_id":"some-fake-node-id","volume_capability":{"AccessType":{"Mount":{}},"access_mode":{"mode":1}}},"Response":null,"Error":"rpc error: code = NotFound desc = Not matching Node ID some-fake-node-id to Mock Node ID io.kubernetes.storage.mock"} +``` diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go new file mode 100644 index 00000000..89835e11 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/cache/SnapshotCache.go @@ -0,0 +1,89 @@ +package cache + +import ( + "strings" + "sync" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +type SnapshotCache interface { + Add(snapshot Snapshot) + + Delete(i int) + + List(ready bool) []csi.Snapshot + + FindSnapshot(k, v string) (int, Snapshot) +} + +type Snapshot struct { + Name string + Parameters map[string]string + SnapshotCSI csi.Snapshot +} + +type snapshotCache struct { + snapshotsRWL sync.RWMutex + snapshots []Snapshot +} + +func NewSnapshotCache() SnapshotCache { + return &snapshotCache{ + snapshots: make([]Snapshot, 0), + } +} + +func (snap *snapshotCache) Add(snapshot Snapshot) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + snap.snapshots = append(snap.snapshots, snapshot) +} + +func (snap *snapshotCache) Delete(i int) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + copy(snap.snapshots[i:], snap.snapshots[i+1:]) + snap.snapshots = snap.snapshots[:len(snap.snapshots)-1] +} + +func (snap *snapshotCache) List(ready bool) []csi.Snapshot { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshots := make([]csi.Snapshot, 0) + for _, v := range snap.snapshots { + if v.SnapshotCSI.GetReadyToUse() { + snapshots = append(snapshots, v.SnapshotCSI) + } + } + + return snapshots +} + +func (snap *snapshotCache) FindSnapshot(k, v string) (int, Snapshot) { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshotIdx := -1 + for i, vi := range snap.snapshots { + switch k { + case "id": + if strings.EqualFold(v, vi.SnapshotCSI.GetSnapshotId()) { + return i, vi + } + case "sourceVolumeId": + if strings.EqualFold(v, vi.SnapshotCSI.SourceVolumeId) { + return i, vi + } + case "name": + if vi.Name == v { + return i, vi + } + } + } + + return snapshotIdx, Snapshot{} +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/main.go b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go new file mode 100644 index 00000000..486d383b --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/main.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "flag" + "fmt" + "net" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/kubernetes-csi/csi-test/driver" + "github.com/kubernetes-csi/csi-test/mock/service" +) + +func main() { + var config service.Config + flag.BoolVar(&config.DisableAttach, "disable-attach", false, "Disables RPC_PUBLISH_UNPUBLISH_VOLUME capability.") + flag.StringVar(&config.DriverName, "name", service.Name, "CSI driver name.") + flag.Int64Var(&config.AttachLimit, "attach-limit", 0, "number of attachable volumes on a node") + flag.Parse() + + endpoint := os.Getenv("CSI_ENDPOINT") + if len(endpoint) == 0 { + fmt.Println("CSI_ENDPOINT must be defined and must be a path") + os.Exit(1) + } + if strings.Contains(endpoint, ":") { + fmt.Println("CSI_ENDPOINT must be a unix path") + os.Exit(1) + } + + // Create mock driver + s := service.New(config) + servers := &driver.CSIDriverServers{ + Controller: s, + Identity: s, + Node: s, + } + d := driver.NewCSIDriver(servers) + + // If creds is enabled, set the default creds. + setCreds := os.Getenv("CSI_ENABLE_CREDS") + if len(setCreds) > 0 && setCreds == "true" { + d.SetDefaultCreds() + } + + // Listen + os.Remove(endpoint) + l, err := net.Listen("unix", endpoint) + if err != nil { + fmt.Printf("Error: Unable to listen on %s socket: %v\n", + endpoint, + err) + os.Exit(1) + } + defer os.Remove(endpoint) + + // Start server + if err := d.Start(l); err != nil { + fmt.Printf("Error: Unable to start mock CSI server: %v\n", + err) + os.Exit(1) + } + fmt.Println("mock driver started") + + // Wait for signal + sigc := make(chan os.Signal, 1) + sigs := []os.Signal{ + syscall.SIGTERM, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGQUIT, + } + signal.Notify(sigc, sigs...) + + <-sigc + d.Stop() + fmt.Println("mock driver stopped") +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml new file mode 100644 index 00000000..e7c9f20d --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/mocksecret.yaml @@ -0,0 +1,16 @@ +CreateVolumeSecret: + secretKey: secretval1 +DeleteVolumeSecret: + secretKey: secretval2 +ControllerPublishVolumeSecret: + secretKey: secretval3 +ControllerUnpublishVolumeSecret: + secretKey: secretval4 +NodeStageVolumeSecret: + secretKey: secretval5 +NodePublishVolumeSecret: + secretKey: secretval6 +CreateSnapshotSecret: + secretKey: secretval7 +DeleteSnapshotSecret: + secretKey: secretval8 diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go new file mode 100644 index 00000000..eace79f8 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/controller.go @@ -0,0 +1,577 @@ +package service + +import ( + "fmt" + "math" + "path" + "reflect" + "strconv" + + log "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +const ( + MaxStorageCapacity = tib + ReadOnlyKey = "readonly" +) + +func (s *service) CreateVolume( + ctx context.Context, + req *csi.CreateVolumeRequest) ( + *csi.CreateVolumeResponse, error) { + + if len(req.Name) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume Name cannot be empty") + } + if req.VolumeCapabilities == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") + } + + // Check to see if the volume already exists. + if i, v := s.findVolByName(ctx, req.Name); i >= 0 { + // Requested volume name already exists, need to check if the existing volume's + // capacity is more or equal to new request's capacity. + if v.GetCapacityBytes() < req.GetCapacityRange().GetRequiredBytes() { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Volume with name %s already exists", req.GetName())) + } + return &csi.CreateVolumeResponse{Volume: &v}, nil + } + + // If no capacity is specified then use 100GiB + capacity := gib100 + if cr := req.CapacityRange; cr != nil { + if rb := cr.RequiredBytes; rb > 0 { + capacity = rb + } + if lb := cr.LimitBytes; lb > 0 { + capacity = lb + } + } + // Check for maximum available capacity + if capacity >= MaxStorageCapacity { + return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, MaxStorageCapacity) + } + // Create the volume and add it to the service's in-mem volume slice. + v := s.newVolume(req.Name, capacity) + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + s.vols = append(s.vols, v) + MockVolumes[v.GetVolumeId()] = Volume{ + VolumeCSI: v, + NodeID: "", + ISStaged: false, + ISPublished: false, + StageTargetPath: "", + TargetPath: "", + } + + return &csi.CreateVolumeResponse{Volume: &v}, nil +} + +func (s *service) DeleteVolume( + ctx context.Context, + req *csi.DeleteVolumeRequest) ( + *csi.DeleteVolumeResponse, error) { + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + // If the volume is not specified, return error + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + // If the volume does not exist then return an idempotent response. + i, _ := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return &csi.DeleteVolumeResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + copy(s.vols[i:], s.vols[i+1:]) + s.vols[len(s.vols)-1] = csi.Volume{} + s.vols = s.vols[:len(s.vols)-1] + log.WithField("volumeID", req.VolumeId).Debug("mock delete volume") + return &csi.DeleteVolumeResponse{}, nil +} + +func (s *service) ControllerPublishVolume( + ctx context.Context, + req *csi.ControllerPublishVolumeRequest) ( + *csi.ControllerPublishVolumeResponse, error) { + + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.NodeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Node ID cannot be empty") + } + if req.VolumeCapability == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") + } + + if req.NodeId != s.nodeID { + return nil, status.Errorf(codes.NotFound, "Not matching Node ID %s to Mock Node ID %s", req.NodeId, s.nodeID) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // devPathKey is the key in the volume's attributes that is set to a + // mock device path if the volume has been published by the controller + // to the specified node. + devPathKey := path.Join(req.NodeId, "dev") + + // Check to see if the volume is already published. + if device := v.VolumeContext[devPathKey]; device != "" { + var volRo bool + var roVal string + if ro, ok := v.VolumeContext[ReadOnlyKey]; ok { + roVal = ro + } + + if roVal == "true" { + volRo = true + } else { + volRo = false + } + + // Check if readonly flag is compatible with the publish request. + if req.GetReadonly() != volRo { + return nil, status.Error(codes.AlreadyExists, "Volume published but has incompatible readonly flag") + } + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, + }, + }, nil + } + + var roVal string + if req.GetReadonly() { + roVal = "true" + } else { + roVal = "false" + } + + // Publish the volume. + device := "/dev/mock" + v.VolumeContext[devPathKey] = device + v.VolumeContext[ReadOnlyKey] = roVal + s.vols[i] = v + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, + }, + }, nil +} + +func (s *service) ControllerUnpublishVolume( + ctx context.Context, + req *csi.ControllerUnpublishVolumeRequest) ( + *csi.ControllerUnpublishVolumeResponse, error) { + + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + nodeID := req.NodeId + if len(nodeID) == 0 { + // If node id is empty, no failure as per Spec + nodeID = s.nodeID + } + + if req.NodeId != s.nodeID { + return nil, status.Errorf(codes.NotFound, "Node ID %s does not match to expected Node ID %s", req.NodeId, s.nodeID) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // devPathKey is the key in the volume's attributes that is set to a + // mock device path if the volume has been published by the controller + // to the specified node. + devPathKey := path.Join(nodeID, "dev") + + // Check to see if the volume is already unpublished. + if v.VolumeContext[devPathKey] == "" { + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, devPathKey) + delete(v.VolumeContext, ReadOnlyKey) + s.vols[i] = v + + return &csi.ControllerUnpublishVolumeResponse{}, nil +} + +func (s *service) ValidateVolumeCapabilities( + ctx context.Context, + req *csi.ValidateVolumeCapabilitiesRequest) ( + *csi.ValidateVolumeCapabilitiesResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.VolumeCapabilities) == 0 { + return nil, status.Error(codes.InvalidArgument, req.VolumeId) + } + i, _ := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + return &csi.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: req.GetVolumeContext(), + VolumeCapabilities: req.GetVolumeCapabilities(), + Parameters: req.GetParameters(), + }, + }, nil +} + +func (s *service) ListVolumes( + ctx context.Context, + req *csi.ListVolumesRequest) ( + *csi.ListVolumesResponse, error) { + + // Copy the mock volumes into a new slice in order to avoid + // locking the service's volume slice for the duration of the + // ListVolumes RPC. + var vols []csi.Volume + func() { + s.volsRWL.RLock() + defer s.volsRWL.RUnlock() + vols = make([]csi.Volume, len(s.vols)) + copy(vols, s.vols) + }() + + var ( + ulenVols = int32(len(vols)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.InvalidArgument, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenVols { + return nil, status.Errorf( + codes.InvalidArgument, + "startingToken=%d > len(vols)=%d", + startingToken, ulenVols) + } + + // Discern the number of remaining entries. + rem := ulenVols - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListVolumesResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListVolumesResponse_Entry{ + Volume: &vols[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenVols { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListVolumesResponse{ + Entries: entries, + NextToken: nextToken, + }, nil +} + +func (s *service) GetCapacity( + ctx context.Context, + req *csi.GetCapacityRequest) ( + *csi.GetCapacityResponse, error) { + + return &csi.GetCapacityResponse{ + AvailableCapacity: MaxStorageCapacity, + }, nil +} + +func (s *service) ControllerGetCapabilities( + ctx context.Context, + req *csi.ControllerGetCapabilitiesRequest) ( + *csi.ControllerGetCapabilitiesResponse, error) { + + caps := []*csi.ControllerServiceCapability{ + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + }, + }, + }, + } + + if !s.config.DisableAttach { + caps = append(caps, &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }) + } + + return &csi.ControllerGetCapabilitiesResponse{ + Capabilities: caps, + }, nil +} + +func (s *service) CreateSnapshot(ctx context.Context, + req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + // Check arguments + if len(req.GetName()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty") + } + if len(req.GetSourceVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot SourceVolumeId cannot be empty") + } + + // Check to see if the snapshot already exists. + if i, v := s.snapshots.FindSnapshot("name", req.GetName()); i >= 0 { + // Requested snapshot name already exists + if v.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() || !reflect.DeepEqual(v.Parameters, req.GetParameters()) { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Snapshot with name %s already exists", req.GetName())) + } + return &csi.CreateSnapshotResponse{Snapshot: &v.SnapshotCSI}, nil + } + + // Create the snapshot and add it to the service's in-mem snapshot slice. + snapshot := s.newSnapshot(req.GetName(), req.GetSourceVolumeId(), req.GetParameters()) + s.snapshots.Add(snapshot) + + return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil +} + +func (s *service) DeleteSnapshot(ctx context.Context, + req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + + // If the snapshot is not specified, return error + if len(req.SnapshotId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty") + } + + // If the snapshot does not exist then return an idempotent response. + i, _ := s.snapshots.FindSnapshot("id", req.SnapshotId) + if i < 0 { + return &csi.DeleteSnapshotResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + s.snapshots.Delete(i) + log.WithField("SnapshotId", req.SnapshotId).Debug("mock delete snapshot") + return &csi.DeleteSnapshotResponse{}, nil +} + +func (s *service) ListSnapshots(ctx context.Context, + req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + + // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. + if len(req.GetSnapshotId()) != 0 { + return getSnapshotById(s, req) + } + + // case 2: SourceVolumeId is not empty, return snapshots that match the source volume id. + if len(req.GetSourceVolumeId()) != 0 { + return getSnapshotByVolumeId(s, req) + } + + // case 3: no parameter is set, so we return all the snapshots. + return getAllSnapshots(s, req) +} + +func getSnapshotById(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSnapshotId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("id", req.GetSnapshotId()) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + + if len(req.GetSourceVolumeId()) != 0 { + if snapshot.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() { + return &csi.ListSnapshotsResponse{}, nil + } + } + + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getSnapshotByVolumeId(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSourceVolumeId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("sourceVolumeId", req.SourceVolumeId) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getAllSnapshots(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + // Copy the mock snapshots into a new slice in order to avoid + // locking the service's snapshot slice for the duration of the + // ListSnapshots RPC. + readyToUse := true + snapshots := s.snapshots.List(readyToUse) + + var ( + ulenSnapshots = int32(len(snapshots)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenSnapshots { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d > len(snapshots)=%d", + startingToken, ulenSnapshots) + } + + // Discern the number of remaining entries. + rem := ulenSnapshots - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListSnapshotsResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListSnapshotsResponse_Entry{ + Snapshot: &snapshots[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenSnapshots { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListSnapshotsResponse{ + Entries: entries, + NextToken: nextToken, + }, nil +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go new file mode 100644 index 00000000..7e8735a9 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/identity.go @@ -0,0 +1,48 @@ +package service + +import ( + "golang.org/x/net/context" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" +) + +func (s *service) GetPluginInfo( + ctx context.Context, + req *csi.GetPluginInfoRequest) ( + *csi.GetPluginInfoResponse, error) { + + return &csi.GetPluginInfoResponse{ + Name: s.config.DriverName, + VendorVersion: VendorVersion, + Manifest: Manifest, + }, nil +} + +func (s *service) Probe( + ctx context.Context, + req *csi.ProbeRequest) ( + *csi.ProbeResponse, error) { + + return &csi.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, nil +} + +func (s *service) GetPluginCapabilities( + ctx context.Context, + req *csi.GetPluginCapabilitiesRequest) ( + *csi.GetPluginCapabilitiesResponse, error) { + + return &csi.GetPluginCapabilitiesResponse{ + Capabilities: []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + }, + }, nil +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go new file mode 100644 index 00000000..886a219a --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/node.go @@ -0,0 +1,244 @@ +package service + +import ( + "path" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +func (s *service) NodeStageVolume( + ctx context.Context, + req *csi.NodeStageVolumeRequest) ( + *csi.NodeStageVolumeResponse, error) { + + device, ok := req.PublishContext["device"] + if !ok { + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetStagingTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeStgPathKey is the key in the volume's attributes that is set to a + // mock stage path if the volume has been published by the node + nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) + + // Check to see if the volume has already been staged. + if v.VolumeContext[nodeStgPathKey] != "" { + // TODO: Check for the capabilities to be equal. Return "ALREADY_EXISTS" + // if the capabilities don't match. + return &csi.NodeStageVolumeResponse{}, nil + } + + // Stage the volume. + v.VolumeContext[nodeStgPathKey] = device + s.vols[i] = v + + return &csi.NodeStageVolumeResponse{}, nil +} + +func (s *service) NodeUnstageVolume( + ctx context.Context, + req *csi.NodeUnstageVolumeRequest) ( + *csi.NodeUnstageVolumeResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetStagingTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeStgPathKey is the key in the volume's attributes that is set to a + // mock stage path if the volume has been published by the node + nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) + + // Check to see if the volume has already been unstaged. + if v.VolumeContext[nodeStgPathKey] == "" { + return &csi.NodeUnstageVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, nodeStgPathKey) + s.vols[i] = v + + return &csi.NodeUnstageVolumeResponse{}, nil +} + +func (s *service) NodePublishVolume( + ctx context.Context, + req *csi.NodePublishVolumeRequest) ( + *csi.NodePublishVolumeResponse, error) { + + device, ok := req.PublishContext["device"] + if !ok { + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeMntPathKey is the key in the volume's attributes that is set to a + // mock mount path if the volume has been published by the node + nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) + + // Check to see if the volume has already been published. + if v.VolumeContext[nodeMntPathKey] != "" { + + // Requests marked Readonly fail due to volumes published by + // the Mock driver supporting only RW mode. + if req.Readonly { + return nil, status.Error(codes.AlreadyExists, req.VolumeId) + } + + return &csi.NodePublishVolumeResponse{}, nil + } + + // Publish the volume. + if req.GetStagingTargetPath() != "" { + v.VolumeContext[nodeMntPathKey] = req.GetStagingTargetPath() + } else { + v.VolumeContext[nodeMntPathKey] = device + } + s.vols[i] = v + + return &csi.NodePublishVolumeResponse{}, nil +} + +func (s *service) NodeUnpublishVolume( + ctx context.Context, + req *csi.NodeUnpublishVolumeRequest) ( + *csi.NodeUnpublishVolumeResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.GetTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeMntPathKey is the key in the volume's attributes that is set to a + // mock mount path if the volume has been published by the node + nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) + + // Check to see if the volume has already been unpublished. + if v.VolumeContext[nodeMntPathKey] == "" { + return &csi.NodeUnpublishVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, nodeMntPathKey) + s.vols[i] = v + + return &csi.NodeUnpublishVolumeResponse{}, nil +} + +func (s *service) NodeGetCapabilities( + ctx context.Context, + req *csi.NodeGetCapabilitiesRequest) ( + *csi.NodeGetCapabilitiesResponse, error) { + + return &csi.NodeGetCapabilitiesResponse{ + Capabilities: []*csi.NodeServiceCapability{ + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_UNKNOWN, + }, + }, + }, + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + }, + }, + }, + }, + }, nil +} + +func (s *service) NodeGetInfo(ctx context.Context, + req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + csiNodeResponse := &csi.NodeGetInfoResponse{ + NodeId: s.nodeID, + } + if s.config.AttachLimit > 0 { + csiNodeResponse.MaxVolumesPerNode = s.config.AttachLimit + } + return csiNodeResponse, nil +} + +func (s *service) NodeGetVolumeStats(ctx context.Context, + req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + return &csi.NodeGetVolumeStatsResponse{}, nil + +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go new file mode 100644 index 00000000..2254ccb8 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/mock/service/service.go @@ -0,0 +1,147 @@ +package service + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-test/mock/cache" + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes" +) + +const ( + // Name is the name of the CSI plug-in. + Name = "io.kubernetes.storage.mock" + + // VendorVersion is the version returned by GetPluginInfo. + VendorVersion = "0.3.0" +) + +// Manifest is the SP's manifest. +var Manifest = map[string]string{ + "url": "https://github.com/kubernetes-csi/csi-test/mock", +} + +type Config struct { + DisableAttach bool + DriverName string + AttachLimit int64 +} + +// Service is the CSI Mock service provider. +type Service interface { + csi.ControllerServer + csi.IdentityServer + csi.NodeServer +} + +type service struct { + sync.Mutex + nodeID string + vols []csi.Volume + volsRWL sync.RWMutex + volsNID uint64 + snapshots cache.SnapshotCache + snapshotsNID uint64 + config Config +} + +type Volume struct { + sync.Mutex + VolumeCSI csi.Volume + NodeID string + ISStaged bool + ISPublished bool + StageTargetPath string + TargetPath string +} + +var MockVolumes map[string]Volume + +// New returns a new Service. +func New(config Config) Service { + s := &service{ + nodeID: config.DriverName, + config: config, + } + s.snapshots = cache.NewSnapshotCache() + s.vols = []csi.Volume{ + s.newVolume("Mock Volume 1", gib100), + s.newVolume("Mock Volume 2", gib100), + s.newVolume("Mock Volume 3", gib100), + } + MockVolumes = map[string]Volume{} + + s.snapshots.Add(s.newSnapshot("Mock Snapshot 1", "1", map[string]string{"Description": "snapshot 1"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 2", "2", map[string]string{"Description": "snapshot 2"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 3", "3", map[string]string{"Description": "snapshot 3"})) + + return s +} + +const ( + kib int64 = 1024 + mib int64 = kib * 1024 + gib int64 = mib * 1024 + gib100 int64 = gib * 100 + tib int64 = gib * 1024 + tib100 int64 = tib * 100 +) + +func (s *service) newVolume(name string, capcity int64) csi.Volume { + return csi.Volume{ + VolumeId: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), + VolumeContext: map[string]string{"name": name}, + CapacityBytes: capcity, + } +} + +func (s *service) findVol(k, v string) (volIdx int, volInfo csi.Volume) { + s.volsRWL.RLock() + defer s.volsRWL.RUnlock() + return s.findVolNoLock(k, v) +} + +func (s *service) findVolNoLock(k, v string) (volIdx int, volInfo csi.Volume) { + volIdx = -1 + + for i, vi := range s.vols { + switch k { + case "id": + if strings.EqualFold(v, vi.GetVolumeId()) { + return i, vi + } + case "name": + if n, ok := vi.VolumeContext["name"]; ok && strings.EqualFold(v, n) { + return i, vi + } + } + } + + return +} + +func (s *service) findVolByName( + ctx context.Context, name string) (int, csi.Volume) { + + return s.findVol("name", name) +} + +func (s *service) newSnapshot(name, sourceVolumeId string, parameters map[string]string) cache.Snapshot { + + ptime := ptypes.TimestampNow() + return cache.Snapshot{ + Name: name, + Parameters: parameters, + SnapshotCSI: csi.Snapshot{ + SnapshotId: fmt.Sprintf("%d", atomic.AddUint64(&s.snapshotsNID, 1)), + CreationTime: ptime, + SourceVolumeId: sourceVolumeId, + ReadyToUse: true, + }, + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md index f258382c..fd30f192 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/README.md @@ -6,15 +6,6 @@ For CSI drivers written in Golang, the framework provides a simple API function to call to test the driver. Another way to run the test suite is to use the command line program [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity). -## Status -Although the project can be used immediately, it will not provide full -coverage since it is not yet finished. Below shows the percentage of -completion for each CSI service: - -* Identity Service: 95% -* Controller Service: 0% -* Node Service: 0% - ## For Golang CSI Drivers This framework leverages the Ginkgo BDD testing framework to deliver a descriptive test suite for your driver. To test your driver, simply call the API in one of your @@ -22,13 +13,50 @@ Golang `TestXXX` functions. For example: ```go func TestMyDriver(t *testing.T) { - // Setup the full driver and its environment - ... setup driver ... + // Setup the full driver and its environment + ... setup driver ... + config := &sanity.Config{ + TargetPath: ... + StagingPath: ... + Address: endpoint, + } + - // Now call the test suite - sanity.Test(t, driverEndpointAddress) + // Now call the test suite + sanity.Test(t, config) } ``` +Only one such test function is supported because under the hood a +Ginkgo test suite gets constructed and executed by the call. + +Alternatively, the tests can also be embedded inside a Ginkgo test +suite. In that case it is possible to define multiple tests with +different configurations: + +```go +var _ = Describe("MyCSIDriver", func () { + Context("Config A", func () { + var config &sanity.Config + + BeforeEach(func() { + //... setup driver and config... + }) + + AfterEach(func() { + //...tear down driver... + }) + + Describe("CSI sanity", func() { + sanity.GinkgoTest(config) + }) + }) + + Context("Config B", func () { + // other configs + }) +}) +``` + ## Command line program Please see [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go new file mode 100644 index 00000000..65a30334 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/cleanup.go @@ -0,0 +1,134 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + "context" + "log" + + "github.com/container-storage-interface/spec/lib/go/csi" + + . "github.com/onsi/ginkgo" +) + +// VolumeInfo keeps track of the information needed to delete a volume. +type VolumeInfo struct { + // Node on which the volume was published, empty if none + // or publishing is not supported. + NodeID string + + // Volume ID assigned by CreateVolume. + VolumeID string +} + +// Cleanup keeps track of resources, in particular volumes, which need +// to be freed when testing is done. +type Cleanup struct { + Context *SanityContext + ControllerClient csi.ControllerClient + NodeClient csi.NodeClient + ControllerPublishSupported bool + NodeStageSupported bool + + // Maps from volume name to the node ID for which the volume + // is published and the volume ID. + volumes map[string]VolumeInfo +} + +// RegisterVolume adds or updates an entry for the volume with the +// given name. +func (cl *Cleanup) RegisterVolume(name string, info VolumeInfo) { + if cl.volumes == nil { + cl.volumes = make(map[string]VolumeInfo) + } + cl.volumes[name] = info +} + +// MaybeRegisterVolume adds or updates an entry for the volume with +// the given name if CreateVolume was successful. +func (cl *Cleanup) MaybeRegisterVolume(name string, vol *csi.CreateVolumeResponse, err error) { + if err == nil && vol.GetVolume().GetVolumeId() != "" { + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + } +} + +// UnregisterVolume removes the entry for the volume with the +// given name, thus preventing all cleanup operations for it. +func (cl *Cleanup) UnregisterVolume(name string) { + if cl.volumes != nil { + delete(cl.volumes, name) + } +} + +// DeleteVolumes stops using the registered volumes and tries to delete all of them. +func (cl *Cleanup) DeleteVolumes() { + if cl.volumes == nil { + return + } + logger := log.New(GinkgoWriter, "cleanup: ", 0) + ctx := context.Background() + + for name, info := range cl.volumes { + logger.Printf("deleting %s = %s", name, info.VolumeID) + if _, err := cl.NodeClient.NodeUnpublishVolume( + ctx, + &csi.NodeUnpublishVolumeRequest{ + VolumeId: info.VolumeID, + TargetPath: cl.Context.Config.TargetPath, + }, + ); err != nil { + logger.Printf("warning: NodeUnpublishVolume: %s", err) + } + + if cl.NodeStageSupported { + if _, err := cl.NodeClient.NodeUnstageVolume( + ctx, + &csi.NodeUnstageVolumeRequest{ + VolumeId: info.VolumeID, + StagingTargetPath: cl.Context.Config.StagingPath, + }, + ); err != nil { + logger.Printf("warning: NodeUnstageVolume: %s", err) + } + } + + if cl.ControllerPublishSupported && info.NodeID != "" { + if _, err := cl.ControllerClient.ControllerUnpublishVolume( + ctx, + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: info.VolumeID, + NodeId: info.NodeID, + Secrets: cl.Context.Secrets.ControllerUnpublishVolumeSecret, + }, + ); err != nil { + logger.Printf("warning: ControllerUnpublishVolume: %s", err) + } + } + + if _, err := cl.ControllerClient.DeleteVolume( + ctx, + &csi.DeleteVolumeRequest{ + VolumeId: info.VolumeID, + Secrets: cl.Context.Secrets.DeleteVolumeSecret, + }, + ); err != nil { + logger.Printf("error: DeleteVolume: %s", err) + } + + cl.UnregisterVolume(name) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go index 33e999b7..022e1e6d 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/controller.go @@ -17,33 +17,56 @@ limitations under the License. package sanity import ( + "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/container-storage-interface/spec/lib/go/csi" - context "golang.org/x/net/context" + + "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -func verifyVolumeInfo(v *csi.VolumeInfo) { +const ( + // DefTestVolumeSize defines the base size of dynamically + // provisioned volumes. 10GB by default, can be overridden by + // setting Config.TestVolumeSize. + DefTestVolumeSize int64 = 10 * 1024 * 1024 * 1024 + + MaxNameLength int = 128 +) + +func TestVolumeSize(sc *SanityContext) int64 { + if sc.Config.TestVolumeSize > 0 { + return sc.Config.TestVolumeSize + } + return DefTestVolumeSize +} + +func verifyVolumeInfo(v *csi.Volume) { Expect(v).NotTo(BeNil()) - Expect(v.GetId()).NotTo(BeEmpty()) + Expect(v.GetVolumeId()).NotTo(BeEmpty()) } -func isCapabilitySupported( +func verifySnapshotInfo(snapshot *csi.Snapshot) { + Expect(snapshot).NotTo(BeNil()) + Expect(snapshot.GetSnapshotId()).NotTo(BeEmpty()) + Expect(snapshot.GetSourceVolumeId()).NotTo(BeEmpty()) + Expect(snapshot.GetCreationTime()).NotTo(BeZero()) +} + +func isControllerCapabilitySupported( c csi.ControllerClient, capType csi.ControllerServiceCapability_RPC_Type, ) bool { caps, err := c.ControllerGetCapabilities( context.Background(), - &csi.ControllerGetCapabilitiesRequest{ - Version: csiClientVersion, - }) + &csi.ControllerGetCapabilitiesRequest{}) Expect(err).NotTo(HaveOccurred()) Expect(caps).NotTo(BeNil()) Expect(caps.GetCapabilities()).NotTo(BeNil()) @@ -57,387 +80,1367 @@ func isCapabilitySupported( return false } -var _ = Describe("ControllerGetCapabilities [Controller Server]", func() { +var _ = DescribeSanity("Controller Service", func(sc *SanityContext) { var ( c csi.ControllerClient + n csi.NodeClient + + cl *Cleanup ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) + n = csi.NewNodeClient(sc.Conn) + + cl = &Cleanup{ + NodeClient: n, + ControllerClient: c, + Context: sc, + } }) - It("should fail when no version is provided", func() { - _, err := c.ControllerGetCapabilities( - context.Background(), - &csi.ControllerGetCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) + AfterEach(func() { + cl.DeleteVolumes() + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + Describe("ControllerGetCapabilities", func() { + It("should return appropriate capabilities", func() { + caps, err := c.ControllerGetCapabilities( + context.Background(), + &csi.ControllerGetCapabilitiesRequest{}) + + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + + switch cap.GetRpc().GetType() { + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: + case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: + case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: + case csi.ControllerServiceCapability_RPC_GET_CAPACITY: + case csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT: + case csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS: + case csi.ControllerServiceCapability_RPC_PUBLISH_READONLY: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + } + } + }) }) - It("should return appropriate capabilities", func() { - caps, err := c.ControllerGetCapabilities( - context.Background(), - &csi.ControllerGetCapabilitiesRequest{ - Version: csiClientVersion, - }) + Describe("GetCapacity", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { + Skip("GetCapacity not supported") + } + }) - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) + It("should return capacity (no optional values added)", func() { + _, err := c.GetCapacity( + context.Background(), + &csi.GetCapacityRequest{}) + Expect(err).NotTo(HaveOccurred()) - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + // Since capacity is int64 we will not be checking it + // The value of zero is a possible value. + }) + }) - switch cap.GetRpc().GetType() { - case csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME: - case csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: - case csi.ControllerServiceCapability_RPC_LIST_VOLUMES: - case csi.ControllerServiceCapability_RPC_GET_CAPACITY: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + Describe("ListVolumes", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { + Skip("ListVolumes not supported") } - } - }) -}) + }) -var _ = Describe("GetCapacity [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + It("should return appropriate values (no optional values added)", func() { + vols, err := c.ListVolumes( + context.Background(), + &csi.ListVolumesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(vols).NotTo(BeNil()) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + for _, vol := range vols.GetEntries() { + verifyVolumeInfo(vol.GetVolume()) + } + }) - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_GET_CAPACITY) { - Skip("GetCapacity not supported") - } + // TODO: Add test to test for tokens + + // TODO: Add test which checks list of volume is there when created, + // and not there when deleted. }) - It("should fail when no version is provided", func() { + Describe("CreateVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("CreateVolume not supported") + } + }) - By("failing when there is no version") - _, err := c.GetCapacity( - context.Background(), - &csi.GetCapacityRequest{}) - Expect(err).To(HaveOccurred()) + It("should fail when no name is provided", func() { + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + cl.MaybeRegisterVolume("", vol, err) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should return capacity (no optional values added)", func() { - _, err := c.GetCapacity( - context.Background(), - &csi.GetCapacityRequest{ - Version: csiClientVersion, - }) - Expect(err).NotTo(HaveOccurred()) + It("should fail when no volume capabilities are provided", func() { + name := uniqueString("sanity-controller-create-no-volume-capabilities") + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + cl.MaybeRegisterVolume(name, vol, err) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-single-no-capacity") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-single-with-capacity") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: TestVolumeSize(sc), + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + if serverError, ok := status.FromError(err); ok && + (serverError.Code() == codes.OutOfRange || serverError.Code() == codes.Unimplemented) { + Skip("Required bytes not supported") + } + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", TestVolumeSize(sc))) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + It("should not fail when requesting to create a volume with already existing name and same capacity.", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-twice") + size := TestVolumeSize(sc) + + vol1, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol1).NotTo(BeNil()) + Expect(vol1.GetVolume()).NotTo(BeNil()) + Expect(vol1.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetVolumeId()}) + Expect(vol1.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + + vol2, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol2).NotTo(BeNil()) + Expect(vol2.GetVolume()).NotTo(BeNil()) + Expect(vol2.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + Expect(vol2.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + Expect(vol1.GetVolume().GetVolumeId()).To(Equal(vol2.GetVolume().GetVolumeId())) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol1.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + It("should fail when requesting to create a volume with already existing name and different capacity.", func() { + + By("creating a volume") + name := uniqueString("sanity-controller-create-twice-different") + size1 := TestVolumeSize(sc) + + vol1, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).ToNot(HaveOccurred()) + Expect(vol1).NotTo(BeNil()) + Expect(vol1.GetVolume()).NotTo(BeNil()) + Expect(vol1.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol1.GetVolume().GetVolumeId()}) + size2 := 2 * TestVolumeSize(sc) + + _, err = c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size2, + LimitBytes: size2, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol1.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should not fail when creating volume with maximum-length name", func() { - // Since capacity is uint64 we will not be checking it - // The value of zero is a possible value. + nameBytes := make([]byte, MaxNameLength) + for i := 0; i < MaxNameLength; i++ { + nameBytes[i] = 'a' + } + name := string(nameBytes) + By("creating a volume") + size := TestVolumeSize(sc) + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + Expect(vol.GetVolume().GetCapacityBytes()).To(BeNumerically(">=", size)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) -}) -var _ = Describe("ListVolumes [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + Describe("DeleteVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { + Skip("DeleteVolume not supported") + } + }) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + It("should fail when no volume id is provided", func() { - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) { - Skip("ListVolumes not supported") - } + _, err := c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should succeed when an invalid volume id is used", func() { + + _, err := c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: "reallyfakevolumeid", + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a volume") + name := uniqueString("sanity-controller-create-appropriate") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // Delete Volume + By("deleting a volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - It("should fail when no version is provided", func() { + Describe("ValidateVolumeCapabilities", func() { + It("should fail when no volume id is provided", func() { - By("failing when there is no version") - _, err := c.ListVolumes( - context.Background(), - &csi.ListVolumesRequest{}) - Expect(err).To(HaveOccurred()) + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{}) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capabilities are provided", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-validate") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // ValidateVolumeCapabilities + By("validating volume capabilities") + valivolcap, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(valivolcap).NotTo(BeNil()) + + // If confirmation is provided then it is REQUIRED to provide + // the volume capabilities + if valivolcap.GetConfirmed() != nil { + Expect(valivolcap.GetConfirmed().GetVolumeCapabilities()).NotTo(BeEmpty()) + } + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the requested volume does not exist", func() { + + _, err := c.ValidateVolumeCapabilities( + context.Background(), + &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: "some-vol-id", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) }) - It("should return appropriate values (no optional values added)", func() { - vols, err := c.ListVolumes( - context.Background(), - &csi.ListVolumesRequest{ - Version: csiClientVersion, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(vols).NotTo(BeNil()) - Expect(vols.GetEntries()).NotTo(BeNil()) + Describe("ControllerPublishVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerPublishVolume not supported") + } + }) - for _, vol := range vols.GetEntries() { - verifyVolumeInfo(vol.GetVolumeInfo()) - } + It("should fail when no volume id is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no node id is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "id", + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should fail when no volume capability is provided", func() { + + _, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "id", + NodeId: "fakenode", + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-publish") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + + By("cleaning up unpublishing the volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the volume does not exist", func() { + + By("calling controller publish on a non-existent volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: "some-vol-id", + NodeId: "some-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + }) + + It("should fail when the node does not exist", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-wrong-node") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: "some-fake-node-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.NotFound)) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + + It("should fail when the volume is already published but is incompatible", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-published-incompatible") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + pubReq := &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + } + + conpubvol, err := c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).NotTo(HaveOccurred()) + Expect(conpubvol).NotTo(BeNil()) + + // Publish again with different attributes. + pubReq.Readonly = true + + conpubvol, err = c.ControllerPublishVolume(context.Background(), pubReq) + Expect(err).To(HaveOccurred()) + Expect(conpubvol).To(BeNil()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) + + By("cleaning up unpublishing the volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) + + By("cleaning up deleting the volume") + + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) }) - // TODO: Add test to test for tokens + Describe("ControllerUnpublishVolume", func() { + BeforeEach(func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { + Skip("ControllerUnpublishVolume not supported") + } + }) + + It("should fail when no volume id is provided", func() { + + _, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) + + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) + + It("should return appropriate values (no optional values added)", func() { + + // Create Volume First + By("creating a single node writer volume") + name := uniqueString("sanity-controller-unpublish") + + vol, err := c.CreateVolume( + context.Background(), + &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + Secrets: sc.Secrets.CreateVolumeSecret, + Parameters: sc.Config.TestVolumeParameters, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(vol).NotTo(BeNil()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := n.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) + + // ControllerPublishVolume + By("calling controllerpublish on that volume") + + conpubvol, err := c.ControllerPublishVolume( + context.Background(), + &csi.ControllerPublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) + Expect(conpubvol).NotTo(BeNil()) + + // ControllerUnpublishVolume + By("calling controllerunpublish on that volume") + + conunpubvol, err := c.ControllerUnpublishVolume( + context.Background(), + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + // NodeID is optional in ControllerUnpublishVolume + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(conunpubvol).NotTo(BeNil()) + + By("cleaning up deleting the volume") - // TODO: Add test which checks list of volume is there when created, - // and not there when deleted. + _, err = c.DeleteVolume( + context.Background(), + &csi.DeleteVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + cl.UnregisterVolume(name) + }) + }) }) -var _ = Describe("CreateVolume [Controller Server]", func() { +var _ = DescribeSanity("ListSnapshots [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) + c = csi.NewControllerClient(sc.Conn) - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("CreateVolume not supported") + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS) { + Skip("ListSnapshots not supported") } }) - It("should fail when no version is provided", func() { - - _, err := c.CreateVolume( + It("should return appropriate values (no optional values added)", func() { + snapshots, err := c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{}) - Expect(err).To(HaveOccurred()) + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + for _, snapshot := range snapshots.GetEntries() { + verifySnapshotInfo(snapshot.GetSnapshot()) + } }) - It("should fail when no name is provided", func() { + It("should return snapshots that match the specify snapshot id", func() { - _, err := c.CreateVolume( + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + + snapshots, err := c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - }) - Expect(err).To(HaveOccurred()) + &csi.ListSnapshotsRequest{SnapshotId: snapshot.GetSnapshot().GetSnapshotId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(BeNumerically("==", 1)) + verifySnapshotInfo(snapshots.GetEntries()[0].GetSnapshot()) + Expect(snapshots.GetEntries()[0].GetSnapshot().GetSnapshotId()).To(Equal(snapshot.GetSnapshot().GetSnapshotId())) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) }) - It("should fail when no volume capabilities are provided", func() { + It("should return empty when the specify snapshot id is not exist", func() { - _, err := c.CreateVolume( + snapshots, err := c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: "name", - }) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + &csi.ListSnapshotsRequest{SnapshotId: "none-exist-id"}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) }) - It("should return appropriate values SingleNodeWriter NoCapacity Type:Mount", func() { + It("should return snapshots that match the specify source volume id)", func() { By("creating a volume") - name := "sanity" - vol, err := c.CreateVolume( + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-2") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-2", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) + Expect(err).NotTo(HaveOccurred()) + + snapshots, err := c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + &csi.ListSnapshotsRequest{SourceVolumeId: snapshot.GetSnapshot().GetSourceVolumeId()}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + for _, snap := range snapshots.GetEntries() { + verifySnapshotInfo(snap.GetSnapshot()) + Expect(snap.GetSnapshot().GetSourceVolumeId()).To(Equal(snapshot.GetSnapshot().GetSourceVolumeId())) + } + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - By("cleaning up deleting the volume") - _, err = c.DeleteVolume( + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return empty when the specify source volume id is not exist", func() { + + snapshots, err := c.ListSnapshots( context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + &csi.ListSnapshotsRequest{SourceVolumeId: "none-exist-volume-id"}) Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(snapshots.GetEntries()).To(BeEmpty()) }) - // Pending fix in mock file - It("[MOCKERRORS] should return appropriate values SingleNodeWriter WithCapacity 1Gi Type:Mount", func() { + It("check the presence of new snapshots in the snapshot list", func() { + // List Snapshots before creating new snapshots. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + + totalSnapshots := len(snapshots.GetEntries()) By("creating a volume") - name := "sanity" - size := uint64(1 * 1024 * 1024 * 1024) - vol, err := c.CreateVolume( + volReq := MakeCreateVolumeReq(sc, "listSnapshots-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + + By("creating a snapshot") + snapReq := MakeCreateSnapshotReq(sc, "listSnapshots-snapshot-3", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + + snapshots, err = c.ListSnapshots( context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - CapacityRange: &csi.CapacityRange{ - RequiredBytes: size, - }, - }) + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots + 1)) + + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - Expect(vol.GetVolumeInfo().GetCapacityBytes()).To(Equal(size)) By("cleaning up deleting the volume") - _, err = c.DeleteVolume( + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + + // List snapshots and check if the deleted snapshot exists in the snapshot list. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + &csi.ListSnapshotsRequest{}) Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) + Expect(len(snapshots.GetEntries())).To(Equal(totalSnapshots)) }) -}) -var _ = Describe("DeleteVolume [Controller Server]", func() { - var ( - c csi.ControllerClient - ) + It("should return next token when a limited number of entries are requested", func() { + // minSnapshotCount is the minimum number of snapshots expected to exist, + // based on which paginated snapshot listing is performed. + minSnapshotCount := 5 + // maxEntried is the maximum entries in list snapshot request. + maxEntries := 2 + // currentTotalVols is the total number of volumes at a given time. It + // is used to verify that all the snapshots have been listed. + currentTotalSnapshots := 0 + + // Get the number of existing volumes. + snapshots, err := c.ListSnapshots( + context.Background(), + &csi.ListSnapshotsRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - BeforeEach(func() { - c = csi.NewControllerClient(conn) + initialTotalSnapshots := len(snapshots.GetEntries()) + currentTotalSnapshots = initialTotalSnapshots - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) { - Skip("DeleteVolume not supported") - } - }) + createVols := make([]*csi.Volume, 0) + createSnapshots := make([]*csi.Snapshot, 0) - It("should fail when no version is provided", func() { + // Ensure minimum minVolCount volumes exist. + if initialTotalSnapshots < minSnapshotCount { - _, err := c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{}) - Expect(err).To(HaveOccurred()) + By("creating required new volumes") + requiredSnapshots := minSnapshotCount - initialTotalSnapshots - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + for i := 1; i <= requiredSnapshots; i++ { + volReq := MakeCreateVolumeReq(sc, "volume"+strconv.Itoa(i)) + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + Expect(volume).NotTo(BeNil()) + createVols = append(createVols, volume.GetVolume()) - It("should fail when no volume id is provided", func() { + snapReq := MakeCreateSnapshotReq(sc, "snapshot"+strconv.Itoa(i), volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapReq) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) + createSnapshots = append(createSnapshots, snapshot.GetSnapshot()) + } + + // Update the current total snapshots count. + currentTotalSnapshots += requiredSnapshots + } - _, err := c.DeleteVolume( + // Request list snapshots with max entries maxEntries. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, + &csi.ListSnapshotsRequest{ + MaxEntries: int32(maxEntries), }) - Expect(err).To(HaveOccurred()) + Expect(err).NotTo(HaveOccurred()) + Expect(snapshots).NotTo(BeNil()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + nextToken := snapshots.GetNextToken() - It("should succeed when an invalid volume id is used", func() { + Expect(len(snapshots.GetEntries())).To(Equal(maxEntries)) - _, err := c.DeleteVolume( + // Request list snapshots with starting_token and no max entries. + snapshots, err = c.ListSnapshots( context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: "reallyfakevolumeid", + &csi.ListSnapshotsRequest{ + StartingToken: nextToken, }) Expect(err).NotTo(HaveOccurred()) - }) + Expect(snapshots).NotTo(BeNil()) - It("should return appropriate values (no optional values added)", func() { + // Ensure that all the remaining entries are returned at once. + Expect(len(snapshots.GetEntries())).To(Equal(currentTotalSnapshots - maxEntries)) - // Create Volume First - By("creating a volume") - name := "sanity" - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + if initialTotalSnapshots < minSnapshotCount { - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + By("cleaning up deleting the snapshots") - // Delete Volume - By("deleting a volume") - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) - Expect(err).NotTo(HaveOccurred()) + for _, snap := range createSnapshots { + delSnapReq := MakeDeleteSnapshotReq(sc, snap.GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) + } + + By("cleaning up deleting the volumes") + + for _, vol := range createVols { + delVolReq := MakeDeleteVolumeReq(sc, vol.GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) + } + } }) + }) -var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { +var _ = DescribeSanity("DeleteSnapshot [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - }) + c = csi.NewControllerClient(sc.Conn) - It("should fail when no version is provided", func() { + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("DeleteSnapshot not supported") + } + }) - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) + It("should fail when no snapshot id is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + req := &csi.DeleteSnapshotRequest{} - It("should fail when no volume id is provided", func() { + if sc.Secrets != nil { + req.Secrets = sc.Secrets.DeleteSnapshotSecret + } - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - Version: csiClientVersion, - }) + _, err := c.DeleteSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) serverError, ok := status.FromError(err) @@ -445,351 +1448,252 @@ var _ = Describe("ValidateVolumeCapabilities [Controller Server]", func() { Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no volume capabilities are provided", func() { - - _, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - Version: csiClientVersion, - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + It("should succeed when an invalid snapshot id is used", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + req := MakeDeleteSnapshotReq(sc, "reallyfakesnapshotid") + _, err := c.DeleteSnapshot(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) }) It("should return appropriate values (no optional values added)", func() { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "DeleteSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) + Expect(err).NotTo(HaveOccurred()) + // Create Snapshot First + By("creating a snapshot") + snapshotReq := MakeCreateSnapshotReq(sc, "DeleteSnapshot-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snapshot, err := c.CreateSnapshot(context.Background(), snapshotReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + Expect(snapshot).NotTo(BeNil()) + verifySnapshotInfo(snapshot.GetSnapshot()) - // ValidateVolumeCapabilities - By("validating volume capabilities") - valivolcap, err := c.ValidateVolumeCapabilities( - context.Background(), - &csi.ValidateVolumeCapabilitiesRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snapshot.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(valivolcap).NotTo(BeNil()) - Expect(valivolcap.GetSupported()).To(BeTrue()) By("cleaning up deleting the volume") - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) }) }) -var _ = Describe("ControllerPublishVolume [Controller Server]", func() { +var _ = DescribeSanity("CreateSnapshot [Controller Server]", func(sc *SanityContext) { var ( c csi.ControllerClient - n csi.NodeClient ) BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) + c = csi.NewControllerClient(sc.Conn) - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerPublishVolume not supported") + if !isControllerCapabilitySupported(c, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT) { + Skip("CreateSnapshot not supported") } }) - It("should fail when no version is provided", func() { - - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) + It("should fail when no name is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + req := &csi.CreateSnapshotRequest{ + SourceVolumeId: "testId", + } - It("should fail when no volume id is provided", func() { + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret + } - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - }) + _, err := c.CreateSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should fail when no node id is provided", func() { - - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + It("should fail when no source volume id is provided", func() { - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + req := &csi.CreateSnapshotRequest{ + Name: "name", + } - It("should fail when no volume capability is provided", func() { + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret + } - _, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - NodeId: "fakenode", - }) + _, err := c.CreateSnapshot(context.Background(), req) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) }) - It("should return appropriate values (no optional values added)", func() { + It("should not fail when requesting to create a snapshot with already existing name and same SourceVolumeId.", func() { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-1") + volume, err := c.CreateVolume(context.Background(), volReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - By("getting a node id") - nid, err := n.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{ - Version: csiClientVersion, - }) + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-1", volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - // ControllerPublishVolume - By("calling controllerpublish on that volume") - conpubvol, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - }) + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) - By("cleaning up unpublishing the volume") - conunpubvol, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - }) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) By("cleaning up deleting the volume") - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) }) -}) -var _ = Describe("ControllerUnpublishVolume [Controller Server]", func() { - var ( - c csi.ControllerClient - n csi.NodeClient - ) + It("should fail when requesting to create a snapshot with already existing name and different SourceVolumeId.", func() { - BeforeEach(func() { - c = csi.NewControllerClient(conn) - n = csi.NewNodeClient(conn) + By("creating a volume") + volume, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-2")) + Expect(err).ToNot(HaveOccurred()) - if !isCapabilitySupported(c, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) { - Skip("ControllerUnpublishVolume not supported") - } - }) + By("creating a snapshot with the created volume source id") + req1 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), req1) + Expect(err).NotTo(HaveOccurred()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - It("should fail when no version is provided", func() { + volume2, err := c.CreateVolume(context.Background(), MakeCreateVolumeReq(sc, "CreateSnapshot-volume-3")) + Expect(err).ToNot(HaveOccurred()) - _, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{}) + By("creating a snapshot with the same name but different volume source id") + req2 := MakeCreateSnapshotReq(sc, "CreateSnapshot-snapshot-2", volume2.GetVolume().GetVolumeId(), nil) + _, err = c.CreateSnapshot(context.Background(), req2) Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should fail when no volume id is provided", func() { + Expect(serverError.Code()).To(Equal(codes.AlreadyExists)) - _, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - Version: csiClientVersion, - }) - Expect(err).To(HaveOccurred()) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) + Expect(err).NotTo(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + By("cleaning up deleting the volume") + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) + Expect(err).NotTo(HaveOccurred()) }) - It("should return appropriate values (no optional values added)", func() { + It("should not fail when creating snapshot with maximum-length name", func() { - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - vol, err := c.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - }) + By("creating a volume") + volReq := MakeCreateVolumeReq(sc, "CreateSnapshot-volume-3") + volume, err := c.CreateVolume(context.Background(), volReq) Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) - By("getting a node id") - nid, err := n.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{ - Version: csiClientVersion, - }) + nameBytes := make([]byte, MaxNameLength) + for i := 0; i < MaxNameLength; i++ { + nameBytes[i] = 'a' + } + name := string(nameBytes) + + By("creating a snapshot") + snapReq1 := MakeCreateSnapshotReq(sc, name, volume.GetVolume().GetVolumeId(), nil) + snap1, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + Expect(snap1).NotTo(BeNil()) + verifySnapshotInfo(snap1.GetSnapshot()) - // ControllerPublishVolume - By("calling controllerpublish on that volume") - conpubvol, err := c.ControllerPublishVolume( - context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - NodeId: nid.GetNodeId(), - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - Readonly: false, - }) + snap2, err := c.CreateSnapshot(context.Background(), snapReq1) Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) + Expect(snap2).NotTo(BeNil()) + verifySnapshotInfo(snap2.GetSnapshot()) - // ControllerUnpublishVolume - By("calling controllerunpublish on that volume") - conunpubvol, err := c.ControllerUnpublishVolume( - context.Background(), - &csi.ControllerUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - // NodeID is optional in ControllerUnpublishVolume - NodeId: nid.GetNodeId(), - }) + By("cleaning up deleting the snapshot") + delSnapReq := MakeDeleteSnapshotReq(sc, snap1.GetSnapshot().GetSnapshotId()) + _, err = c.DeleteSnapshot(context.Background(), delSnapReq) Expect(err).NotTo(HaveOccurred()) - Expect(conunpubvol).NotTo(BeNil()) By("cleaning up deleting the volume") - _, err = c.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + delVolReq := MakeDeleteVolumeReq(sc, volume.GetVolume().GetVolumeId()) + _, err = c.DeleteVolume(context.Background(), delVolReq) Expect(err).NotTo(HaveOccurred()) }) }) + +func MakeCreateVolumeReq(sc *SanityContext, name string) *csi.CreateVolumeRequest { + size1 := TestVolumeSize(sc) + + req := &csi.CreateVolumeRequest{ + Name: name, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: size1, + LimitBytes: size1, + }, + Parameters: sc.Config.TestVolumeParameters, + } + + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateVolumeSecret + } + + return req +} + +func MakeCreateSnapshotReq(sc *SanityContext, name, sourceVolumeId string, parameters map[string]string) *csi.CreateSnapshotRequest { + req := &csi.CreateSnapshotRequest{ + Name: name, + SourceVolumeId: sourceVolumeId, + Parameters: parameters, + } + + if sc.Secrets != nil { + req.Secrets = sc.Secrets.CreateSnapshotSecret + } + + return req +} + +func MakeDeleteSnapshotReq(sc *SanityContext, id string) *csi.DeleteSnapshotRequest { + delSnapReq := &csi.DeleteSnapshotRequest{ + SnapshotId: id, + } + + if sc.Secrets != nil { + delSnapReq.Secrets = sc.Secrets.DeleteSnapshotSecret + } + + return delSnapReq +} + +func MakeDeleteVolumeReq(sc *SanityContext, id string) *csi.DeleteVolumeRequest { + delVolReq := &csi.DeleteVolumeRequest{ + VolumeId: id, + } + + if sc.Secrets != nil { + delVolReq.Secrets = sc.Secrets.DeleteVolumeSecret + } + + return delVolReq +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go index facdf39d..c1a5eb7e 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/identity.go @@ -17,86 +17,83 @@ limitations under the License. package sanity import ( + "context" + "fmt" "regexp" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/container-storage-interface/spec/lib/go/csi" - context "golang.org/x/net/context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var ( - csiClientVersion = &csi.Version{ - Major: 0, - Minor: 1, - Patch: 0, - } -) - -var _ = Describe("GetSupportedVersions [Identity Server]", func() { +var _ = DescribeSanity("Identity Service", func(sc *SanityContext) { var ( c csi.IdentityClient ) BeforeEach(func() { - c = csi.NewIdentityClient(conn) + c = csi.NewIdentityClient(sc.Conn) }) - It("should return an array of supported versions", func() { - res, err := c.GetSupportedVersions( - context.Background(), - &csi.GetSupportedVersionsRequest{}) - - By("checking response to have supported versions list") - Expect(err).NotTo(HaveOccurred()) - Expect(res.GetSupportedVersions()).NotTo(BeNil()) - Expect(len(res.GetSupportedVersions()) >= 1).To(BeTrue()) - - By("checking each version") - for _, version := range res.GetSupportedVersions() { - Expect(version).NotTo(BeNil()) - Expect(version.GetMajor()).To(BeNumerically("<", 100)) - Expect(version.GetMinor()).To(BeNumerically("<", 100)) - Expect(version.GetPatch()).To(BeNumerically("<", 100)) - } - }) -}) - -var _ = Describe("GetPluginInfo [Identity Server]", func() { - var ( - c csi.IdentityClient - ) + Describe("GetPluginCapabilities", func() { + It("should return appropriate capabilities", func() { + req := &csi.GetPluginCapabilitiesRequest{} + res, err := c.GetPluginCapabilities(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("checking successful response") + Expect(res.GetCapabilities()).NotTo(BeNil()) + for _, cap := range res.GetCapabilities() { + switch cap.GetService().GetType() { + case csi.PluginCapability_Service_CONTROLLER_SERVICE: + case csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetService().GetType())) + } + } + + }) - BeforeEach(func() { - c = csi.NewIdentityClient(conn) }) - It("should fail when no version is provided", func() { - _, err := c.GetPluginInfo(context.Background(), &csi.GetPluginInfoRequest{}) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + Describe("Probe", func() { + It("should return appropriate information", func() { + req := &csi.ProbeRequest{} + res, err := c.Probe(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying return status") + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code() == codes.FailedPrecondition || + serverError.Code() == codes.OK).To(BeTrue()) + + if res.GetReady() != nil { + Expect(res.GetReady().GetValue() == true || + res.GetReady().GetValue() == false).To(BeTrue()) + } + }) }) - It("should return appropriate information", func() { - req := &csi.GetPluginInfoRequest{ - Version: csiClientVersion, - } - res, err := c.GetPluginInfo(context.Background(), req) - Expect(err).NotTo(HaveOccurred()) - Expect(res).NotTo(BeNil()) - - By("verifying name size and characters") - Expect(res.GetName()).ToNot(HaveLen(0)) - Expect(len(res.GetName())).To(BeNumerically("<=", 63)) - Expect(regexp. - MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). - MatchString(res.GetName())).To(BeTrue()) + Describe("GetPluginInfo", func() { + It("should return appropriate information", func() { + req := &csi.GetPluginInfoRequest{} + res, err := c.GetPluginInfo(context.Background(), req) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + By("verifying name size and characters") + Expect(res.GetName()).ToNot(HaveLen(0)) + Expect(len(res.GetName())).To(BeNumerically("<=", 63)) + Expect(regexp. + MustCompile("^[a-zA-Z][A-Za-z0-9-\\.\\_]{0,61}[a-zA-Z]$"). + MatchString(res.GetName())).To(BeTrue()) + }) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go index 2d4734df..9bd9194b 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/node.go @@ -17,215 +17,237 @@ limitations under the License. package sanity import ( + "context" "fmt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/container-storage-interface/spec/lib/go/csi" - context "golang.org/x/net/context" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var ( - csiTargetPath = "/mnt/csi" -) - -var _ = Describe("NodeGetCapabilities [Node Server]", func() { - var ( - c csi.NodeClient - ) - - BeforeEach(func() { - c = csi.NewNodeClient(conn) - }) - - It("should fail when no version is provided", func() { - _, err := c.NodeGetCapabilities( - context.Background(), - &csi.NodeGetCapabilitiesRequest{}) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should return appropriate capabilities", func() { - caps, err := c.NodeGetCapabilities( - context.Background(), - &csi.NodeGetCapabilitiesRequest{ - Version: csiClientVersion, - }) +func isNodeCapabilitySupported(c csi.NodeClient, + capType csi.NodeServiceCapability_RPC_Type, +) bool { - By("checking successful response") - Expect(err).NotTo(HaveOccurred()) - Expect(caps).NotTo(BeNil()) - Expect(caps.GetCapabilities()).NotTo(BeNil()) - - for _, cap := range caps.GetCapabilities() { - Expect(cap.GetRpc()).NotTo(BeNil()) + caps, err := c.NodeGetCapabilities( + context.Background(), + &csi.NodeGetCapabilitiesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) - switch cap.GetRpc().GetType() { - case csi.NodeServiceCapability_RPC_UNKNOWN: - default: - Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) - } + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + if cap.GetRpc().GetType() == capType { + return true } - }) -}) + } + return false +} + +func isPluginCapabilitySupported(c csi.IdentityClient, + capType csi.PluginCapability_Service_Type, +) bool { + + caps, err := c.GetPluginCapabilities( + context.Background(), + &csi.GetPluginCapabilitiesRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + Expect(caps.GetCapabilities()).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetService()).NotTo(BeNil()) + if cap.GetService().GetType() == capType { + return true + } + } + return false +} -var _ = Describe("NodeProbe [Node Server]", func() { +var _ = DescribeSanity("Node Service", func(sc *SanityContext) { var ( - c csi.NodeClient + cl *Cleanup + c csi.NodeClient + s csi.ControllerClient + + controllerPublishSupported bool + nodeStageSupported bool ) BeforeEach(func() { - c = csi.NewNodeClient(conn) - }) - - It("should fail when no version is provided", func() { - _, err := c.NodeProbe( - context.Background(), - &csi.NodeProbeRequest{}) - Expect(err).To(HaveOccurred()) + c = csi.NewNodeClient(sc.Conn) + s = csi.NewControllerClient(sc.Conn) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + controllerPublishSupported = isControllerCapabilitySupported( + s, + csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) + nodeStageSupported = isNodeCapabilitySupported(c, csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME) + if nodeStageSupported { + err := createMountTargetLocation(sc.Config.StagingPath) + Expect(err).NotTo(HaveOccurred()) + } + cl = &Cleanup{ + Context: sc, + NodeClient: c, + ControllerClient: s, + ControllerPublishSupported: controllerPublishSupported, + NodeStageSupported: nodeStageSupported, + } }) - It("should return appropriate values", func() { - pro, err := c.NodeProbe( - context.Background(), - &csi.NodeProbeRequest{ - Version: csiClientVersion, - }) - - Expect(err).NotTo(HaveOccurred()) - Expect(pro).NotTo(BeNil()) + AfterEach(func() { + cl.DeleteVolumes() }) -}) -var _ = Describe("GetNodeID [Node Server]", func() { - var ( - c csi.NodeClient - ) + Describe("NodeGetCapabilities", func() { + It("should return appropriate capabilities", func() { + caps, err := c.NodeGetCapabilities( + context.Background(), + &csi.NodeGetCapabilitiesRequest{}) - BeforeEach(func() { - c = csi.NewNodeClient(conn) + By("checking successful response") + Expect(err).NotTo(HaveOccurred()) + Expect(caps).NotTo(BeNil()) + + for _, cap := range caps.GetCapabilities() { + Expect(cap.GetRpc()).NotTo(BeNil()) + + switch cap.GetRpc().GetType() { + case csi.NodeServiceCapability_RPC_UNKNOWN: + case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: + case csi.NodeServiceCapability_RPC_GET_VOLUME_STATS: + default: + Fail(fmt.Sprintf("Unknown capability: %v\n", cap.GetRpc().GetType())) + } + } + }) }) - It("should fail when no version is provided", func() { - _, err := c.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{}) - Expect(err).To(HaveOccurred()) + Describe("NodeGetInfo", func() { + var ( + i csi.IdentityClient + accessibilityConstraintSupported bool + ) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) - - It("should return appropriate values", func() { - nid, err := c.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{ - Version: csiClientVersion, - }) + BeforeEach(func() { + i = csi.NewIdentityClient(sc.Conn) + accessibilityConstraintSupported = isPluginCapabilitySupported(i, csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS) + }) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) - }) -}) + It("should return approproate values", func() { + ninfo, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) -var _ = Describe("NodePublishVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - ) + Expect(err).NotTo(HaveOccurred()) + Expect(ninfo).NotTo(BeNil()) + Expect(ninfo.GetNodeId()).NotTo(BeEmpty()) + Expect(ninfo.GetMaxVolumesPerNode()).NotTo(BeNumerically("<", 0)) - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - controllerPublishSupported = isCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) + if accessibilityConstraintSupported { + Expect(ninfo.GetAccessibleTopology()).NotTo(BeNil()) + } + }) }) - It("should fail when no version is provided", func() { + Describe("NodePublishVolume", func() { + It("should fail when no volume id is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + It("should fail when no target path is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: "id", + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - It("should fail when no volume id is provided", func() { + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - }) - Expect(err).To(HaveOccurred()) + It("should fail when no volume capability is provided", func() { + _, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: "id", + TargetPath: sc.Config.TargetPath, + Secrets: sc.Secrets.NodePublishVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should fail when no target path is provided", func() { + Describe("NodeUnpublishVolume", func() { + It("should fail when no volume id is provided", func() { - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{}) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should fail when no volume capability is provided", func() { + It("should fail when no target path is provided", func() { - _, err := c.NodePublishVolume( - context.Background(), - &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - TargetPath: csiTargetPath, - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodeUnpublishVolume( + context.Background(), + &csi.NodeUnpublishVolumeRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should return appropriate values (no optional values added)", func() { + Describe("NodeStageVolume", func() { + var ( + device string + ) - // Create Volume First - By("creating a single node writer volume") - name := "sanity" - vol, err := s.CreateVolume( - context.Background(), - &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, - VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ + BeforeEach(func() { + if !nodeStageSupported { + Skip("NodeStageVolume not supported") + } + + device = "/dev/mock" + }) + + It("should fail when no volume id is provided", func() { + _, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + StagingTargetPath: sc.Config.StagingPath, + VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -233,32 +255,24 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + ) + Expect(err).To(HaveOccurred()) - By("getting a node id") - nid, err := c.GetNodeID( - context.Background(), - &csi.GetNodeIDRequest{ - Version: csiClientVersion, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nid).NotTo(BeNil()) - Expect(nid.GetNodeId()).NotTo(BeEmpty()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - var conpubvol *csi.ControllerPublishVolumeResponse - if controllerPublishSupported { - By("controller publishing volume") - conpubvol, err = s.ControllerPublishVolume( + It("should fail when no staging target path is provided", func() { + _, err := c.NodeStageVolume( context.Background(), - &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - NodeId: nid.GetNodeId(), + &csi.NodeStageVolumeRequest{ + VolumeId: "id", VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -267,138 +281,86 @@ var _ = Describe("NodePublishVolume [Node Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(conpubvol).NotTo(BeNil()) - } - - // NodePublishVolume - By("publishing the volume on a node") - nodepubvolRequest := &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, }, - }, - } - if controllerPublishSupported { - nodepubvolRequest.PublishVolumeInfo = conpubvol.GetPublishVolumeInfo() - } - nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) - Expect(err).NotTo(HaveOccurred()) - Expect(nodepubvol).NotTo(BeNil()) + ) + Expect(err).To(HaveOccurred()) - // NodeUnpublishVolume - By("cleaning up calling nodeunpublish") - nodeunpubvol, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunpubvol).NotTo(BeNil()) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - if controllerPublishSupported { - By("cleaning up calling controllerunpublishing the volume") - nodeunpubvol, err := c.NodeUnpublishVolume( + It("should fail when no volume capability is provided", func() { + _, err := c.NodeStageVolume( context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nodeunpubvol).NotTo(BeNil()) - } - - By("cleaning up deleting the volume") - _, err = s.DeleteVolume( - context.Background(), - &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) - Expect(err).NotTo(HaveOccurred()) - }) -}) - -var _ = Describe("NodeUnpublishVolume [Node Server]", func() { - var ( - s csi.ControllerClient - c csi.NodeClient - controllerPublishSupported bool - ) + &csi.NodeStageVolumeRequest{ + VolumeId: "id", + StagingTargetPath: sc.Config.StagingPath, + PublishContext: map[string]string{ + "device": device, + }, + Secrets: sc.Secrets.NodeStageVolumeSecret, + }, + ) + Expect(err).To(HaveOccurred()) - BeforeEach(func() { - s = csi.NewControllerClient(conn) - c = csi.NewNodeClient(conn) - controllerPublishSupported = isCapabilitySupported( - s, - csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should fail when no version is provided", func() { - - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{}) - Expect(err).To(HaveOccurred()) - - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + Describe("NodeUnstageVolume", func() { + BeforeEach(func() { + if !nodeStageSupported { + Skip("NodeUnstageVolume not supported") + } + }) - It("should fail when no volume id is provided", func() { + It("should fail when no volume id is provided", func() { - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + StagingTargetPath: sc.Config.StagingPath, + }) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) - }) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) - It("should fail when no target path is provided", func() { + It("should fail when no staging target path is provided", func() { - _, err := c.NodeUnpublishVolume( - context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: "id", - }) - Expect(err).To(HaveOccurred()) + _, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: "id", + }) + Expect(err).To(HaveOccurred()) - serverError, ok := status.FromError(err) - Expect(ok).To(BeTrue()) - Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + serverError, ok := status.FromError(err) + Expect(ok).To(BeTrue()) + Expect(serverError.Code()).To(Equal(codes.InvalidArgument)) + }) }) - It("should return appropriate values (no optional values added)", func() { + It("should work", func() { + name := uniqueString("sanity-node-full") // Create Volume First By("creating a single node writer volume") - name := "sanity" vol, err := s.CreateVolume( context.Background(), &csi.CreateVolumeRequest{ - Version: csiClientVersion, - Name: name, + Name: name, VolumeCapabilities: []*csi.VolumeCapability{ - &csi.VolumeCapability{ + { AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -407,22 +369,32 @@ var _ = Describe("NodeUnpublishVolume [Node Server]", func() { }, }, }, - }) + Secrets: sc.Secrets.CreateVolumeSecret, + }, + ) Expect(err).NotTo(HaveOccurred()) Expect(vol).NotTo(BeNil()) - Expect(vol.GetVolumeInfo()).NotTo(BeNil()) - Expect(vol.GetVolumeInfo().GetId()).NotTo(BeEmpty()) + Expect(vol.GetVolume()).NotTo(BeNil()) + Expect(vol.GetVolume().GetVolumeId()).NotTo(BeEmpty()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId()}) + + By("getting a node id") + nid, err := c.NodeGetInfo( + context.Background(), + &csi.NodeGetInfoRequest{}) + Expect(err).NotTo(HaveOccurred()) + Expect(nid).NotTo(BeNil()) + Expect(nid.GetNodeId()).NotTo(BeEmpty()) - // ControllerPublishVolume var conpubvol *csi.ControllerPublishVolumeResponse if controllerPublishSupported { - By("calling controllerpublish on the volume") + By("controller publishing volume") + conpubvol, err = s.ControllerPublishVolume( context.Background(), &csi.ControllerPublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - NodeId: "foobar", + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), VolumeCapability: &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, @@ -431,65 +403,115 @@ var _ = Describe("NodeUnpublishVolume [Node Server]", func() { Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, - Readonly: false, - }) + VolumeContext: vol.GetVolume().GetVolumeContext(), + Readonly: false, + Secrets: sc.Secrets.ControllerPublishVolumeSecret, + }, + ) Expect(err).NotTo(HaveOccurred()) + cl.RegisterVolume(name, VolumeInfo{VolumeID: vol.GetVolume().GetVolumeId(), NodeID: nid.GetNodeId()}) Expect(conpubvol).NotTo(BeNil()) } - + // NodeStageVolume + if nodeStageSupported { + By("node staging volume") + nodestagevol, err := c.NodeStageVolume( + context.Background(), + &csi.NodeStageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + StagingTargetPath: sc.Config.StagingPath, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodeStageVolumeSecret, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodestagevol).NotTo(BeNil()) + } // NodePublishVolume By("publishing the volume on a node") - nodepubvolRequest := &csi.NodePublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - VolumeCapability: &csi.VolumeCapability{ - AccessType: &csi.VolumeCapability_Mount{ - Mount: &csi.VolumeCapability_MountVolume{}, - }, - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + var stagingPath string + if nodeStageSupported { + stagingPath = sc.Config.StagingPath + } + nodepubvol, err := c.NodePublishVolume( + context.Background(), + &csi.NodePublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.Config.TargetPath, + StagingTargetPath: stagingPath, + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, + VolumeContext: vol.GetVolume().GetVolumeContext(), + PublishContext: conpubvol.GetPublishContext(), + Secrets: sc.Secrets.NodePublishVolumeSecret, }, - } - if controllerPublishSupported { - nodepubvolRequest.PublishVolumeInfo = conpubvol.GetPublishVolumeInfo() - } - nodepubvol, err := c.NodePublishVolume(context.Background(), nodepubvolRequest) + ) Expect(err).NotTo(HaveOccurred()) Expect(nodepubvol).NotTo(BeNil()) // NodeUnpublishVolume + By("cleaning up calling nodeunpublish") nodeunpubvol, err := c.NodeUnpublishVolume( context.Background(), &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, + VolumeId: vol.GetVolume().GetVolumeId(), + TargetPath: sc.Config.TargetPath, }) Expect(err).NotTo(HaveOccurred()) Expect(nodeunpubvol).NotTo(BeNil()) + if nodeStageSupported { + By("cleaning up calling nodeunstage") + nodeunstagevol, err := c.NodeUnstageVolume( + context.Background(), + &csi.NodeUnstageVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + StagingTargetPath: sc.Config.StagingPath, + }, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(nodeunstagevol).NotTo(BeNil()) + } + if controllerPublishSupported { - By("cleaning up unpublishing the volume") - nodeunpubvol, err := c.NodeUnpublishVolume( + By("cleaning up calling controllerunpublishing") + + controllerunpubvol, err := s.ControllerUnpublishVolume( context.Background(), - &csi.NodeUnpublishVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - TargetPath: csiTargetPath, - }) + &csi.ControllerUnpublishVolumeRequest{ + VolumeId: vol.GetVolume().GetVolumeId(), + NodeId: nid.GetNodeId(), + Secrets: sc.Secrets.ControllerUnpublishVolumeSecret, + }, + ) Expect(err).NotTo(HaveOccurred()) - Expect(nodeunpubvol).NotTo(BeNil()) + Expect(controllerunpubvol).NotTo(BeNil()) } By("cleaning up deleting the volume") + _, err = s.DeleteVolume( context.Background(), &csi.DeleteVolumeRequest{ - Version: csiClientVersion, - VolumeId: vol.GetVolumeInfo().GetId(), - }) + VolumeId: vol.GetVolume().GetVolumeId(), + Secrets: sc.Secrets.DeleteVolumeSecret, + }, + ) Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go index ecf88b19..e3c1684e 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/sanity.go @@ -17,10 +17,14 @@ limitations under the License. package sanity import ( - "sync" + "crypto/rand" + "fmt" + "io/ioutil" + "os" "testing" "github.com/kubernetes-csi/csi-test/utils" + yaml "gopkg.in/yaml.v2" "google.golang.org/grpc" @@ -28,28 +32,163 @@ import ( . "github.com/onsi/gomega" ) -var ( - driverAddress string - conn *grpc.ClientConn - lock sync.Mutex -) +// CSISecrets consists of secrets used in CSI credentials. +type CSISecrets struct { + CreateVolumeSecret map[string]string `yaml:"CreateVolumeSecret"` + DeleteVolumeSecret map[string]string `yaml:"DeleteVolumeSecret"` + ControllerPublishVolumeSecret map[string]string `yaml:"ControllerPublishVolumeSecret"` + ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` + NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` + NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` + CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` + DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` +} + +// Config provides the configuration for the sanity tests. It +// needs to be initialized by the user of the sanity package. +type Config struct { + TargetPath string + StagingPath string + Address string + SecretsFile string + + TestVolumeSize int64 + TestVolumeParametersFile string + TestVolumeParameters map[string]string +} + +// SanityContext holds the variables that each test can depend on. It +// gets initialized before each test block runs. +type SanityContext struct { + Config *Config + Conn *grpc.ClientConn + Secrets *CSISecrets + + connAddress string +} -// Test will test the CSI driver at the specified address -func Test(t *testing.T, address string) { - lock.Lock() - defer lock.Unlock() +// Test will test the CSI driver at the specified address by +// setting up a Ginkgo suite and running it. +func Test(t *testing.T, reqConfig *Config) { + path := reqConfig.TestVolumeParametersFile + if len(path) != 0 { + yamlFile, err := ioutil.ReadFile(path) + if err != nil { + panic(fmt.Sprintf("failed to read file %q: %v", path, err)) + } + err = yaml.Unmarshal(yamlFile, &reqConfig.TestVolumeParameters) + if err != nil { + panic(fmt.Sprintf("error unmarshaling yaml: %v", err)) + } + } - driverAddress = address + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) RegisterFailHandler(Fail) RunSpecs(t, "CSI Driver Test Suite") } -var _ = BeforeSuite(func() { +func GinkgoTest(reqConfig *Config) { + sc := &SanityContext{ + Config: reqConfig, + } + + registerTestsInGinkgo(sc) +} + +func (sc *SanityContext) setup() { var err error - conn, err = utils.Connect(driverAddress) + + if len(sc.Config.SecretsFile) > 0 { + sc.Secrets, err = loadSecrets(sc.Config.SecretsFile) + Expect(err).NotTo(HaveOccurred()) + } else { + sc.Secrets = &CSISecrets{} + } + + // It is possible that a test sets sc.Config.Address + // dynamically (and differently!) in a BeforeEach, so only + // reuse the connection if the address is still the same. + if sc.Conn == nil || sc.connAddress != sc.Config.Address { + By("connecting to CSI driver") + sc.Conn, err = utils.Connect(sc.Config.Address) + Expect(err).NotTo(HaveOccurred()) + sc.connAddress = sc.Config.Address + } else { + By(fmt.Sprintf("reusing connection to CSI driver at %s", sc.connAddress)) + } + + By("creating mount and staging directories") + err = createMountTargetLocation(sc.Config.TargetPath) Expect(err).NotTo(HaveOccurred()) -}) + if len(sc.Config.StagingPath) > 0 { + err = createMountTargetLocation(sc.Config.StagingPath) + Expect(err).NotTo(HaveOccurred()) + } +} + +func (sc *SanityContext) teardown() { + // We intentionally do not close the connection to the CSI + // driver here because the large amount of connection attempts + // caused test failures + // (https://github.com/kubernetes-csi/csi-test/issues/101). We + // could fix this with retries + // (https://github.com/kubernetes-csi/csi-test/pull/97) but + // that requires more discussion, so instead we just connect + // once per process instead of once per test case. This was + // also said to be faster + // (https://github.com/kubernetes-csi/csi-test/pull/98). +} + +func createMountTargetLocation(targetPath string) error { + fileInfo, err := os.Stat(targetPath) + if err != nil && os.IsNotExist(err) { + return os.MkdirAll(targetPath, 0755) + } else if err != nil { + return err + } + if !fileInfo.IsDir() { + return fmt.Errorf("Target location %s is not a directory", targetPath) + } + + return nil +} + +func loadSecrets(path string) (*CSISecrets, error) { + var creds CSISecrets + + yamlFile, err := ioutil.ReadFile(path) + if err != nil { + return &creds, fmt.Errorf("failed to read file %q: #%v", path, err) + } -var _ = AfterSuite(func() { - conn.Close() -}) + err = yaml.Unmarshal(yamlFile, &creds) + if err != nil { + return &creds, fmt.Errorf("error unmarshaling yaml: #%v", err) + } + + return &creds, nil +} + +var uniqueSuffix = "-" + pseudoUUID() + +// pseudoUUID returns a unique string generated from random +// bytes, empty string in case of error. +func pseudoUUID() string { + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + // Shouldn't happen?! + return "" + } + return fmt.Sprintf("%08X-%08X", b[0:4], b[4:8]) +} + +// uniqueString returns a unique string by appending a random +// number. In case of an error, just the prefix is returned, so it +// alone should already be fairly unique. +func uniqueString(prefix string) string { + return prefix + uniqueSuffix +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go new file mode 100644 index 00000000..47763b75 --- /dev/null +++ b/vendor/github.com/kubernetes-csi/csi-test/pkg/sanity/tests.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sanity + +import ( + . "github.com/onsi/ginkgo" +) + +type test struct { + text string + body func(*SanityContext) +} + +var tests []test + +// DescribeSanity must be used instead of the usual Ginkgo Describe to +// register a test block. The difference is that the body function +// will be called multiple times with the right context (when +// setting up a Ginkgo suite or a testing.T test, with the right +// configuration). +func DescribeSanity(text string, body func(*SanityContext)) bool { + tests = append(tests, test{text, body}) + return true +} + +// registerTestsInGinkgo invokes the actual Gingko Describe +// for the tests registered earlier with DescribeSanity. +func registerTestsInGinkgo(sc *SanityContext) { + for _, test := range tests { + Describe(test.text, func() { + BeforeEach(func() { + sc.setup() + }) + + test.body(sc) + + AfterEach(func() { + sc.teardown() + }) + }) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go index 49ad8283..03b0f052 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/co_test.go @@ -16,13 +16,16 @@ limitations under the License. package test import ( + "context" + "fmt" + "reflect" "testing" "github.com/container-storage-interface/spec/lib/go/csi" - gomock "github.com/golang/mock/gomock" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" mock_driver "github.com/kubernetes-csi/csi-test/driver" mock_utils "github.com/kubernetes-csi/csi-test/utils" - "golang.org/x/net/context" ) func TestPluginInfoResponse(t *testing.T) { @@ -33,13 +36,7 @@ func TestPluginInfoResponse(t *testing.T) { driver := mock_driver.NewMockIdentityServer(m) // Setup input - in := &csi.GetPluginInfoRequest{ - Version: &csi.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - } + in := &csi.GetPluginInfoRequest{} // Setup mock outout out := &csi.GetPluginInfoResponse{ @@ -64,6 +61,24 @@ func TestPluginInfoResponse(t *testing.T) { } } +type pbMatcher struct { + x proto.Message +} + +func (p pbMatcher) Matches(x interface{}) bool { + y := x.(proto.Message) + return proto.Equal(p.x, y) +} + +func (p pbMatcher) String() string { + return fmt.Sprintf("pb equal to %v", p.x) +} + +func pbMatch(x interface{}) gomock.Matcher { + v := x.(proto.Message) + return &pbMatcher{v} +} + func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup mock @@ -72,13 +87,7 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { driver := mock_driver.NewMockIdentityServer(m) // Setup input - in := &csi.GetPluginInfoRequest{ - Version: &csi.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - } + in := &csi.GetPluginInfoRequest{} // Setup mock outout out := &csi.GetPluginInfoResponse{ @@ -91,7 +100,7 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { // Setup expectation // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value - driver.EXPECT().GetPluginInfo(gomock.Any(), in).Return(out, nil).Times(1) + driver.EXPECT().GetPluginInfo(gomock.Any(), pbMatch(in)).Return(out, nil).Times(1) // Create a new RPC server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ @@ -115,3 +124,65 @@ func TestGRPCGetPluginInfoReponse(t *testing.T) { t.Errorf("Unknown name: %s\n", name) } } + +func TestGRPCAttach(t *testing.T) { + + // Setup mock + m := gomock.NewController(&mock_utils.SafeGoroutineTester{}) + defer m.Finish() + driver := mock_driver.NewMockControllerServer(m) + + // Setup input + defaultVolumeID := "myname" + defaultNodeID := "MyNodeID" + defaultCaps := &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + } + publishVolumeInfo := map[string]string{ + "first": "foo", + "second": "bar", + "third": "baz", + } + defaultRequest := &csi.ControllerPublishVolumeRequest{ + VolumeId: defaultVolumeID, + NodeId: defaultNodeID, + VolumeCapability: defaultCaps, + Readonly: false, + } + + // Setup mock outout + out := &csi.ControllerPublishVolumeResponse{ + PublishContext: publishVolumeInfo, + } + + // Setup expectation + // !IMPORTANT!: Must set context expected value to gomock.Any() to match any value + driver.EXPECT().ControllerPublishVolume(gomock.Any(), pbMatch(defaultRequest)).Return(out, nil).Times(1) + + // Create a new RPC + server := mock_driver.NewMockCSIDriver(&mock_driver.MockCSIDriverServers{ + Controller: driver, + }) + conn, err := server.Nexus() + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + defer server.Close() + + // Make call + c := csi.NewControllerClient(conn) + r, err := c.ControllerPublishVolume(context.Background(), defaultRequest) + if err != nil { + t.Errorf("Error: %s", err.Error()) + } + + info := r.GetPublishContext() + if !reflect.DeepEqual(info, publishVolumeInfo) { + t.Errorf("Invalid publish info: %v", info) + } +} diff --git a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go index a0cf555a..ae8c3367 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go +++ b/vendor/github.com/kubernetes-csi/csi-test/test/driver_test.go @@ -36,17 +36,13 @@ type simpleDriver struct { wg sync.WaitGroup } -func (s *simpleDriver) GetSupportedVersions( - context.Context, *csi.GetSupportedVersionsRequest) (*csi.GetSupportedVersionsResponse, error) { - return &csi.GetSupportedVersionsResponse{ - SupportedVersions: []*csi.Version{ - &csi.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - }, - }, nil +func (s *simpleDriver) GetPluginCapabilities(context.Context, *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + // TODO: Return some simple Plugin Capabilities + return &csi.GetPluginCapabilitiesResponse{}, nil +} + +func (s *simpleDriver) Probe(context.Context, *csi.ProbeRequest) (*csi.ProbeResponse, error) { + return &csi.ProbeResponse{}, nil } func (s *simpleDriver) GetPluginInfo( diff --git a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go index c89a5cf1..3baf9672 100644 --- a/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go +++ b/vendor/github.com/kubernetes-csi/csi-test/utils/safegoroutinetester.go @@ -29,7 +29,7 @@ type SafeGoroutineTester struct{} // Errorf prints the error to the screen then panics func (s *SafeGoroutineTester) Errorf(format string, args ...interface{}) { - fmt.Printf(format, args) + fmt.Printf(format, args...) panic("MOCK TEST ERROR") }