From 83a4ef15dfda6403862437f8acf3606f255e888a Mon Sep 17 00:00:00 2001 From: Peeyush Gupta Date: Tue, 19 Nov 2019 08:58:58 -0500 Subject: [PATCH 01/30] Adding build for ppc64le --- build.make | 1 + 1 file changed, 1 insertion(+) diff --git a/build.make b/build.make index 1b6f35fe1..7075a37ee 100644 --- a/build.make +++ b/build.make @@ -70,6 +70,7 @@ build-%: check-go-version-go CGO_ENABLED=0 GOOS=linux go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$* if [ "$$ARCH" = "amd64" ]; then \ CGO_ENABLED=0 GOOS=windows go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*.exe ./cmd/$* ; \ + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*-ppc64le ./cmd/$* fi container-%: build-% From 003c14b2d4ae3b1463db5e5b3ff91f39b03f5ba8 Mon Sep 17 00:00:00 2001 From: Grant Griffiths Date: Mon, 11 Nov 2019 23:49:42 -0800 Subject: [PATCH 02/30] Add snapshotter CRDs after cluster setup Signed-off-by: Grant Griffiths --- prow.sh | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/prow.sh b/prow.sh index 3b9621a21..bc9c9f03b 100755 --- a/prow.sh +++ b/prow.sh @@ -322,6 +322,9 @@ configvar CSI_PROW_E2E_ALPHA_GATES_1_16 'VolumeSnapshotDataSource=true' "alpha f configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true' "alpha feature gates for latest Kubernetes" configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" +# Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment +configvar CSI_SNAPSHOTTER_VERSION 'v2.0.0-rc4' "external-snapshotter version tag" + # Some tests are known to be unusable in a KinD cluster. For example, # stopping kubelet with "ssh systemctl stop kubelet" simply # doesn't work. Such tests should be written in a way that they verify @@ -657,6 +660,59 @@ install_hostpath () { fi } +# Installs all nessesary snapshotter CRDs +install_snapshot_crds() { + # Wait until volumesnapshot CRDs are in place. + CRD_BASE_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/config/crd" + kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotclasses.yaml" --validate=false + kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshots.yaml" --validate=false + kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotcontents.yaml" --validate=false + cnt=0 + until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io \ + && kubectl get volumesnapshots.snapshot.storage.k8s.io \ + && kubectl get volumesnapshotcontents.snapshot.storage.k8s.io; do + if [ $cnt -gt 30 ]; then + echo >&2 "ERROR: snapshot CRDs not ready after over 1 min" + exit 1 + fi + echo "$(date +%H:%M:%S)" "waiting for snapshot CRDs, attempt #$cnt" + cnt=$((cnt + 1)) + sleep 2 + done +} + +# Install snapshot controller and associated RBAC, retrying until the pod is running. +install_snapshot_controller() { + kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml" + cnt=0 + until kubectl get clusterrolebinding snapshot-controller-role; do + if [ $cnt -gt 30 ]; then + echo "Cluster role bindings:" + kubectl describe clusterrolebinding + echo >&2 "ERROR: snapshot controller RBAC not ready after over 5 min" + exit 1 + fi + echo "$(date +%H:%M:%S)" "waiting for snapshot RBAC setup complete, attempt #$cnt" + cnt=$((cnt + 1)) + sleep 10 + done + + + kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml" + cnt=0 + until kubectl get statefulset snapshot-controller | grep snapshot-controller | grep "1/1"; do + if [ $cnt -gt 30 ]; then + echo "Running statefulsets:" + kubectl describe statefulsets + echo >&2 "ERROR: snapshot controller not ready after over 5 min" + exit 1 + fi + echo "$(date +%H:%M:%S)" "waiting for snapshot controller deployment to complete, attempt #$cnt" + cnt=$((cnt + 1)) + sleep 10 + done +} + # collect logs and cluster status (like the version of all components, Kubernetes version, test version) collect_cluster_info () { cat < Date: Sat, 30 Nov 2019 00:29:00 +0530 Subject: [PATCH 03/30] Use kind v0.6.0 kind v0.6.0 appends the kubeconfig with the default config at ~/.kube/config. --- prow.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/prow.sh b/prow.sh index d719e9808..b88230629 100755 --- a/prow.sh +++ b/prow.sh @@ -107,8 +107,7 @@ configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version # kind version to use. If the pre-installed version is different, # the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases/download/ # (if available), otherwise it is built from source. -# TODO: https://github.com/kubernetes-csi/csi-release-tools/issues/39 -configvar CSI_PROW_KIND_VERSION "86bc23d84ac12dcb56a0528890736e2c347c2dc3" "kind" +configvar CSI_PROW_KIND_VERSION "v0.6.0" "kind" # ginkgo test runner version to use. If the pre-installed version is # different, the desired version is built from source. @@ -579,8 +578,7 @@ EOF die "Cluster creation failed again, giving up. See the 'kind-cluster' artifact directory for additional logs." fi fi - KUBECONFIG="$(kind get kubeconfig-path --name=csi-prow)" - export KUBECONFIG + export KUBECONFIG="${HOME}/.kube/config" } # Deletes kind cluster inside a prow job From 9a7a685ee169d669a0182532315aeecd8a8715dc Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Mon, 2 Dec 2019 17:45:57 -0800 Subject: [PATCH 04/30] Create a kind cluster with two worker nodes so that the topology feature can be tested. Test cases that test accessing volumes from multiple nodes need to be skipped --- prow.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/prow.sh b/prow.sh index b88230629..815bc9d8a 100755 --- a/prow.sh +++ b/prow.sh @@ -330,7 +330,11 @@ configvar CSI_SNAPSHOTTER_VERSION 'v2.0.0-rc4' "external-snapshotter version tag # whether they can run with the current cluster provider, but until # they are, we filter them out by name. Like the other test selection # variables, this is again a space separated list of regular expressions. -configvar CSI_PROW_E2E_SKIP 'Disruptive' "tests that need to be skipped" +# +# "different node" test skips can be removed once +# https://github.com/kubernetes/kubernetes/pull/82678 has been backported +# to all the K8s versions we test against +configvar CSI_PROW_E2E_SKIP 'Disruptive|different\s+node' "tests that need to be skipped" # This is the directory for additional result files. Usually set by Prow, but # if not (for example, when invoking manually) it defaults to the work directory. @@ -526,6 +530,7 @@ apiVersion: kind.sigs.k8s.io/v1alpha3 nodes: - role: control-plane - role: worker +- role: worker EOF # kubeadm has API dependencies between apiVersion and Kubernetes version @@ -840,10 +845,6 @@ run_e2e () ( install_e2e || die "building e2e.test failed" install_ginkgo || die "installing ginkgo failed" - # TODO (?): multi-node cluster (depends on https://github.com/kubernetes-csi/csi-driver-host-path/pull/14). - # When running on a multi-node cluster, we need to figure out where the - # hostpath driver was deployed and set ClientNodeName accordingly. - generate_test_driver >"${CSI_PROW_WORK}/test-driver.yaml" || die "generating test-driver.yaml failed" # Rename, merge and filter JUnit files. Necessary in case that we run the E2E suite again From 53888ae7d59b862bfded46b883570c98f5fc5fff Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Tue, 3 Dec 2019 18:18:38 -0800 Subject: [PATCH 05/30] Improve README by adding an explicit Kubernetes dependency section --- README.md | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index cc40f161c..08c828846 100644 --- a/README.md +++ b/README.md @@ -141,17 +141,6 @@ The `vendor` directory is optional. It is still present in projects because it avoids downloading sources during CI builds. If this is no longer deemed necessary, then a project can also remove the directory. -When using packages that are part of the Kubernetes source code, the -commands above are not enough because the [lack of semantic -versioning](https://github.com/kubernetes/kubernetes/issues/72638) -prevents `go mod` from finding newer releases. Importing directly from -`kubernetes/kubernetes` also needs `replace` statements to override -the fake `v0.0.0` versions -(https://github.com/kubernetes/kubernetes/issues/79384). The -`go-get-kubernetes.sh` script can be used to update all packages in -lockstep to a different Kubernetes version. It takes a single version -number like "1.16.0". - Conversion of a repository that uses `dep` to `go mod` can be done with: GO111MODULE=on go mod init @@ -160,3 +149,18 @@ Conversion of a repository that uses `dep` to `go mod` can be done with: GO111MODULE=on go mod vendor git rm -f Gopkg.toml Gopkg.lock git add go.mod go.sum vendor + +### Updating Kubernetes dependencies + +When using packages that are part of the Kubernetes source code, the +commands above are not enough because the [lack of semantic +versioning](https://github.com/kubernetes/kubernetes/issues/72638) +prevents `go mod` from finding newer releases. Importing directly from +`kubernetes/kubernetes` also needs `replace` statements to override +the fake `v0.0.0` versions +(https://github.com/kubernetes/kubernetes/issues/79384). The +`go-get-kubernetes.sh` script can be used to update all packages in +lockstep to a different Kubernetes version. Example usage: +``` +$ ./release-tools/go-get-kubernetes.sh 1.16.4 +``` From 4ad69492c97834b52d74d0b67b6f47b0590e4be1 Mon Sep 17 00:00:00 2001 From: Grant Griffiths Date: Tue, 3 Dec 2019 23:48:29 -0800 Subject: [PATCH 06/30] Improve snapshot pod running checks and improve version_gt Signed-off-by: Grant Griffiths --- prow.sh | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/prow.sh b/prow.sh index d719e9808..27c705129 100755 --- a/prow.sh +++ b/prow.sh @@ -713,10 +713,11 @@ install_snapshot_controller() { kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml" cnt=0 - until kubectl get statefulset snapshot-controller | grep snapshot-controller | grep "1/1"; do + expected_running_pods=$(curl https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${CSI_SNAPSHOTTER_VERSION}"/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | grep replicas | cut -d ':' -f 2-) + while [ "$(kubectl get pods -l app=snapshot-controller | grep 'Running' -c)" -lt "$expected_running_pods" ]; do if [ $cnt -gt 30 ]; then - echo "Running statefulsets:" - kubectl describe statefulsets + echo "snapshot-controller pod status:" + kubectl describe pods -l app=snapshot-controller echo >&2 "ERROR: snapshot controller not ready after over 5 min" exit 1 fi @@ -996,8 +997,30 @@ make_test_to_junit () { fi } +# version_gt returns true if arg1 is greater than arg2. +# +# This function expects versions to be one of the following formats: +# X.Y.Z, release-X.Y.Z, vX.Y.Z +# +# where X,Y, and Z are any number. +# +# Partial versions (1.2, release-1.2) work as well. +# The follow substrings are stripped before version comparison: +# - "v" +# - "release-" +# +# Usage: +# version_gt release-1.3 v1.2.0 (returns true) +# version_gt v1.1.1 v1.2.0 (returns false) +# version_gt 1.1.1 v1.2.0 (returns false) +# version_gt 1.3.1 v1.2.0 (returns true) +# version_gt 1.1.1 release-1.2.0 (returns false) +# version_gt 1.2.0 1.2.2 (returns false) function version_gt() { - test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; + versions=$(for ver in "$@"; do ver=${ver#release-}; echo "${ver#v}"; done) + greaterVersion=${1#"release-"}; + greaterVersion=${greaterVersion#"v"}; + test "$(printf '%s' "$versions" | sort -V | head -n 1)" != "$greaterVersion" } main () { From a4e629966848d0097461f49f8aab0dfeb68d7a68 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Wed, 4 Dec 2019 14:12:50 -0800 Subject: [PATCH 07/30] fix syntax for ppc64le build --- build.make | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.make b/build.make index 7075a37ee..a9b9d25dc 100644 --- a/build.make +++ b/build.make @@ -70,7 +70,7 @@ build-%: check-go-version-go CGO_ENABLED=0 GOOS=linux go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$* if [ "$$ARCH" = "amd64" ]; then \ CGO_ENABLED=0 GOOS=windows go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*.exe ./cmd/$* ; \ - CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*-ppc64le ./cmd/$* + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build $(GOFLAGS_VENDOR) -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$*-ppc64le ./cmd/$* ; \ fi container-%: build-% From b98b2aed087f4e768db673a9d43dff4c7abd1482 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Mon, 16 Dec 2019 19:13:38 -0800 Subject: [PATCH 08/30] Enable snapshot tests in 1.17 to be run in non-alpha jobs. This requires adding one more parallel e2e test run with a special focus flag because snapshot tests are still guarded with a "[Feature:VolumeSnapshotDataSource]" tag. The setting that skips all tests with "[Feature:.*]" has to be removed because it overrides the focus. We don't have serial snapshot tests yet. This needs to be modified again if we add any in the future. --- prow.sh | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/prow.sh b/prow.sh index 9bfdc2e28..c160f886c 100755 --- a/prow.sh +++ b/prow.sh @@ -132,7 +132,7 @@ configvar CSI_PROW_BUILD_JOB true "building code in repo enabled" # use the same settings as for "latest" Kubernetes. This works # as long as there are no breaking changes in Kubernetes, like # deprecating or changing the implementation of an alpha feature. -configvar CSI_PROW_KUBERNETES_VERSION 1.15.3 "Kubernetes" +configvar CSI_PROW_KUBERNETES_VERSION 1.17.0 "Kubernetes" # This is a hack to workaround the issue that each version # of kind currently only supports specific patch versions of @@ -142,7 +142,6 @@ configvar CSI_PROW_KUBERNETES_VERSION 1.15.3 "Kubernetes" # # If the version is prefixed with "release-", then nothing # is overridden. -override_k8s_version "1.14.6" override_k8s_version "1.15.3" # CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and @@ -206,9 +205,9 @@ configvar CSI_PROW_HOSTPATH_CANARY "" "hostpath image" # # CSI_PROW_E2E_REPO=none disables E2E testing. # TOOO: remove versioned variables and make e2e version match k8s version -configvar CSI_PROW_E2E_VERSION_1_14 v1.14.0 "E2E version for Kubernetes 1.14.x" configvar CSI_PROW_E2E_VERSION_1_15 v1.15.0 "E2E version for Kubernetes 1.15.x" configvar CSI_PROW_E2E_VERSION_1_16 v1.16.0 "E2E version for Kubernetes 1.16.x" +configvar CSI_PROW_E2E_VERSION_1_17 v1.17.0 "E2E version for Kubernetes 1.17.x" # TODO: add new CSI_PROW_E2E_VERSION entry for future Kubernetes releases configvar CSI_PROW_E2E_VERSION_LATEST master "E2E version for Kubernetes master" # testing against Kubernetes master is already tracking a moving target, so we might as well use a moving E2E version configvar CSI_PROW_E2E_REPO_LATEST https://github.com/kubernetes/kubernetes "E2E repo for Kubernetes >= 1.13.x" # currently the same for all versions @@ -278,6 +277,14 @@ tests_need_alpha_cluster () { tests_enabled "parallel-alpha" "serial-alpha" } +# Regex for non-alpha, feature-tagged tests that should be run. +# +# Starting with 1.17, snapshots is beta, but the E2E tests still have the +# [Feature:] tag. They need to be explicitly enabled. +configvar CSI_PROW_E2E_FOCUS_1_15 '^' "non-alpha, feature-tagged tests for Kubernetes = 1.15" # no tests to run, match nothing +configvar CSI_PROW_E2E_FOCUS_1_16 '^' "non-alpha, feature-tagged tests for Kubernetes = 1.16" # no tests to run, match nothing +configvar CSI_PROW_E2E_FOCUS_LATEST '\[Feature:VolumeSnapshotDataSource\]' "non-alpha, feature-tagged tests for Kubernetes >= 1.17" +configvar CSI_PROW_E2E_FOCUS "$(get_versioned_variable CSI_PROW_E2E_FOCUS "${csi_prow_kubernetes_version_suffix}")" "non-alpha, feature-tagged tests" # Serial vs. parallel is always determined by these regular expressions. # Individual regular expressions are seperated by spaces for readability @@ -313,12 +320,11 @@ configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi # kubernetes-csi components must be updated, either by disabling # the failing test for "latest" or by updating the test and not running # it anymore for older releases. -configvar CSI_PROW_E2E_ALPHA_GATES_1_14 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.14" configvar CSI_PROW_E2E_ALPHA_GATES_1_15 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.15" configvar CSI_PROW_E2E_ALPHA_GATES_1_16 'VolumeSnapshotDataSource=true' "alpha feature gates for Kubernetes 1.16" # TODO: add new CSI_PROW_ALPHA_GATES_xxx entry for future Kubernetes releases and # add new gates to CSI_PROW_E2E_ALPHA_GATES_LATEST. -configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true' "alpha feature gates for latest Kubernetes" +configvar CSI_PROW_E2E_ALPHA_GATES_LATEST '' "alpha feature gates for latest Kubernetes" configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" # Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment @@ -1111,6 +1117,16 @@ main () { warn "E2E parallel failed" ret=1 fi + + # Run tests that are feature tagged, but non-alpha + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_FOCUS}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}")"; then + warn "E2E parallel features failed" + ret=1 + fi fi if tests_enabled "serial"; then From fc80975954a9720a1611fa9a5195cbfc48b64167 Mon Sep 17 00:00:00 2001 From: Grant Griffiths Date: Fri, 20 Dec 2019 16:30:25 -0800 Subject: [PATCH 09/30] Fix version_gt to work with kubernetes prefix Signed-off-by: Grant Griffiths --- prow.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index c160f886c..832da1ee5 100755 --- a/prow.sh +++ b/prow.sh @@ -1013,6 +1013,7 @@ make_test_to_junit () { # The follow substrings are stripped before version comparison: # - "v" # - "release-" +# - "kubernetes-" # # Usage: # version_gt release-1.3 v1.2.0 (returns true) @@ -1022,8 +1023,9 @@ make_test_to_junit () { # version_gt 1.1.1 release-1.2.0 (returns false) # version_gt 1.2.0 1.2.2 (returns false) function version_gt() { - versions=$(for ver in "$@"; do ver=${ver#release-}; echo "${ver#v}"; done) + versions=$(for ver in "$@"; do ver=${ver#release-}; ver=${ver#kubernetes-}; echo "${ver#v}"; done) greaterVersion=${1#"release-"}; + greaterVersion=${greaterVersion#"kubernetes-"}; greaterVersion=${greaterVersion#"v"}; test "$(printf '%s' "$versions" | sort -V | head -n 1)" != "$greaterVersion" } From af9549b5a11c4849ff7b4a1a42a7b8aea59137c3 Mon Sep 17 00:00:00 2001 From: saad-ali Date: Thu, 2 Jan 2020 14:29:40 -0800 Subject: [PATCH 10/30] Update prow hostpath driver version to 1.3.0-rc2 --- prow.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index 832da1ee5..ae51947de 100755 --- a/prow.sh +++ b/prow.sh @@ -187,7 +187,7 @@ configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csip # # When no deploy script is found (nothing in `deploy` directory, # CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. -configvar CSI_PROW_HOSTPATH_VERSION "v1.2.0" "hostpath driver" +configvar CSI_PROW_HOSTPATH_VERSION "v1.3.0-rc2" "hostpath driver" configvar CSI_PROW_HOSTPATH_REPO https://github.com/kubernetes-csi/csi-driver-host-path "hostpath repo" configvar CSI_PROW_DEPLOYMENT "" "deployment" configvar CSI_PROW_HOSTPATH_DRIVER_NAME "hostpath.csi.k8s.io" "the hostpath driver name" From 8b0316c7e4d94a3206cdab0ea2e8d7f7b3be637b Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Thu, 2 Jan 2020 14:33:46 -0800 Subject: [PATCH 11/30] Fix overriding of junit results by using unique names for each e2e run --- prow.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index ae51947de..e21a7ba9e 100755 --- a/prow.sh +++ b/prow.sh @@ -1123,7 +1123,7 @@ main () { # Run tests that are feature tagged, but non-alpha # Ignore: Double quote to prevent globbing and word splitting. # shellcheck disable=SC2086 - if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ + if ! run_e2e parallel-features ${CSI_PROW_GINKO_PARALLEL} \ -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_FOCUS}"))" \ -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}")"; then warn "E2E parallel features failed" From 8191eab6ffe694d11bf1e0d254330287c961dc77 Mon Sep 17 00:00:00 2001 From: Grant Griffiths Date: Tue, 7 Jan 2020 18:09:31 -0800 Subject: [PATCH 12/30] Update snapshotter to version v2.0.0 Signed-off-by: Grant Griffiths --- prow.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index e21a7ba9e..eb451ef1b 100755 --- a/prow.sh +++ b/prow.sh @@ -328,7 +328,7 @@ configvar CSI_PROW_E2E_ALPHA_GATES_LATEST '' "alpha feature gates for latest Kub configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" # Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment -configvar CSI_SNAPSHOTTER_VERSION 'v2.0.0-rc4' "external-snapshotter version tag" +configvar CSI_SNAPSHOTTER_VERSION 'v2.0.0' "external-snapshotter version tag" # Some tests are known to be unusable in a KinD cluster. For example, # stopping kubelet with "ssh systemctl stop kubelet" simply From 6582f2ff3bd584e662035b7da2a90efc52184b1b Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Thu, 9 Jan 2020 17:25:38 -0800 Subject: [PATCH 13/30] Update hostpath driver version to get fix for connection-timeout --- prow.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index e21a7ba9e..27309a796 100755 --- a/prow.sh +++ b/prow.sh @@ -187,7 +187,7 @@ configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csip # # When no deploy script is found (nothing in `deploy` directory, # CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. -configvar CSI_PROW_HOSTPATH_VERSION "v1.3.0-rc2" "hostpath driver" +configvar CSI_PROW_HOSTPATH_VERSION "v1.3.0-rc3" "hostpath driver" configvar CSI_PROW_HOSTPATH_REPO https://github.com/kubernetes-csi/csi-driver-host-path "hostpath repo" configvar CSI_PROW_DEPLOYMENT "" "deployment" configvar CSI_PROW_HOSTPATH_DRIVER_NAME "hostpath.csi.k8s.io" "the hostpath driver name" From ac8a0212b93cad817689029be503839ad1959e21 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Wed, 11 Dec 2019 17:56:08 -0800 Subject: [PATCH 14/30] Document the process for releasing a new sidecar --- SIDECAR_RELEASE_PROCESS.md | 90 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 SIDECAR_RELEASE_PROCESS.md diff --git a/SIDECAR_RELEASE_PROCESS.md b/SIDECAR_RELEASE_PROCESS.md new file mode 100644 index 000000000..91a1e1b82 --- /dev/null +++ b/SIDECAR_RELEASE_PROCESS.md @@ -0,0 +1,90 @@ +# Sidecar Release Process + +This page describes the process for releasing a kubernetes-csi sidecar. + +## Prerequisites + +The release manager must: + +* Be a member of the kubernetes-csi organization. Open an + [issue](https://github.com/kubernetes/org/issues/new?assignees=&labels=area%2Fgithub-membership&template=membership.md&title=REQUEST%3A+New+membership+for+%3Cyour-GH-handle%3E) in + kubernetes/org to request membership +* Be a top level approver for the repository. To become a top level approver, + the candidate must demonstrate ownership and deep knowledge of the repository + through active maintainence, responding to and fixing issues, reviewing PRs, + test triage. +* Be part of the maintainers or admin group for the repository. admin is a + superset of maintainers, only maintainers level is required for cutting a + release. Membership can be requested by submitting a PR to kubernetes/org. + [Example](https://github.com/kubernetes/org/pull/1467) + +## Updating CI Jobs +Whenever a new Kubernetes minor version is released, our kubernetes-csi CI jobs +must be updated. + +[Our CI jobs](https://k8s-testgrid.appspot.com/sig-storage-csi-ci) have the +naming convention `-on-`. + +1. Jobs should be actively monitored to find and fix failures in sidecars and + infrastructure changes early in the development cycle. Test failures are sent + to kubernetes-sig-storage-test-failures@googlegroups.com. +1. "-on-master" jobs are the closest reflection to the new Kubernetes version. +1. Fixes to our prow.sh CI script can be tested in the [CSI hostpath + repo](https://github.com/kubernetes-csi/csi-driver-host-path) by modifying + [prow.sh](https://github.com/kubernetes-csi/csi-driver-host-path/blob/master/release-tools/prow.sh) + along with any overrides in + [.prow.sh](https://github.com/kubernetes-csi/csi-driver-host-path/blob/master/.prow.sh) + to mirror the failing environment. Once e2e tests are passing (verify-unit tests + will fail), then the prow.sh changes can be submitted to [csi-release-tools](https://github.com/kubernetes-csi/csi-release-tools). +1. Changes can then be updated in all the sidecar repos and hostpath driver repo + by following the [update + instructions](https://github.com/kubernetes-csi/csi-release-tools/blob/master/README.md#sharing-and-updating). +1. New pull and CI jobs are configured by + [gen-jobs.sh](https://github.com/kubernetes/test-infra/blob/master/config/jobs/kubernetes-csi/gen-jobs.sh). + New pull jobs that have been unverified should be initially made optional. + [Example](https://github.com/kubernetes/test-infra/pull/15055) +1. Once new pull and CI jobs have been verified, and the new Kubernetes version + is released, we can make the optional jobs required, and also remove the + Kubernetes versions that are no longer supported. + +## Release Process +1. Identify all issues and ongoing PRs that should go into the release, and + drive them to resolution. +1. Download [K8s release notes + generator](https://github.com/kubernetes/release/tree/master/cmd/release-notes) +1. Generate release notes for the release. Replace arguments with the relevant + information. + ``` + GITHUB_TOKEN= ./release-notes --start-sha=0ed6978fd199e3ca10326b82b4b8b8e916211c9b --end-sha=3cb3d2f18ed8cb40371c6d8886edcabd1f27e7b9 \ + --github-org=kubernetes-csi --github-repo=external-attacher -branch=master -output out.md + ``` + * `--start-sha` should point to the last release from the same branch. For + example: + * `1.X-1.0` tag when releasing `1.X.0` + * `1.X.Y-1` tag when releasing `1.X.Y` +1. Compare the generated output to the new commits for the release to check if + any notable change missed a release note. +1. Reword release notes as needed. Make sure to check notes for breaking + changes and deprecations. +1. If release is a new major/minor version, create a new `CHANGELOG-..md` + file. Otherwise, add the release notes to the top of the existing CHANGELOG + file for that minor version. +1. Submit a PR for the CHANGELOG changes. +1. Submit a PR for README changes, in particular, Compatibility, Feature status, + and any other sections that may need updating. +1. Check that all [canary CI + jobs](https://k8s-testgrid.appspot.com/sig-storage-csi-ci) are passing, + and that test coverage is adequate for the changes that are going into the release. +1. Make sure that no new PRs have merged in the meantime, and no PRs are in + flight and soon to be merged. +1. Create a new release following a previous release as a template. Be sure to select the correct + branch. This requires Github release permissions as required by the prerequisites. + [external-provisioner example](https://github.com/kubernetes-csi/external-provisioner/releases/new) +1. If release was a new major/minor version, create a new `release-` + branch at that commit. +1. Update [kubernetes-csi/docs](https://github.com/kubernetes-csi/docs) sidecar + and feature pages with the new released version. +1. After all the sidecars have been released, update + CSI hostpath driver with the new sidecars in the [CSI repo](https://github.com/kubernetes-csi/csi-driver-host-path/tree/master/deploy) + and [k/k + in-tree](https://github.com/kubernetes/kubernetes/tree/master/test/e2e/testing-manifests/storage-csi/hostpath/hostpath) From fa90abd07862e6fa0a7b66ea2435b9db364ce6c9 Mon Sep 17 00:00:00 2001 From: wangzheng03 Date: Sun, 19 Jan 2020 09:42:26 +0800 Subject: [PATCH 15/30] fix incorrect link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 08c828846..60eab2a98 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,7 @@ on what is enabled in Prow, see https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-csi Test results for periodic jobs are visible in -https://testgrid.k8s.io/sig-storage-csi +https://testgrid.k8s.io/sig-storage-csi-ci It is possible to reproduce the Prow testing locally on a suitable machine: - Linux host From 17905432e4eca8f5ad89a12f0b1363fa0b635cc9 Mon Sep 17 00:00:00 2001 From: wangzheng03 Date: Sun, 19 Jan 2020 17:25:17 +0800 Subject: [PATCH 16/30] fix incorrect link --- doc/development.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/development.md b/doc/development.md index d162f0530..2914313a7 100644 --- a/doc/development.md +++ b/doc/development.md @@ -8,7 +8,7 @@ csi-attacher -kubeconfig ~/.kube/config -v 5 -csi-address /run/csi/socket ## Implementation details -The external-attacher follows [controller](https://github.com/kubernetes/community/blob/master/contributors/devel/controllers.md) pattern and uses informers to watch for `VolumeAttachment` and `PersistentVolume` create/update/delete events. It filters out `VolumeAttachment` instances with `Attacher==` and processes these events in workqueues with exponential backoff. Real handling is deferred to `Handler` interface. +The external-attacher follows [controller](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/controllers.md) pattern and uses informers to watch for `VolumeAttachment` and `PersistentVolume` create/update/delete events. It filters out `VolumeAttachment` instances with `Attacher==` and processes these events in workqueues with exponential backoff. Real handling is deferred to `Handler` interface. `Handler` interface has two implementations, trivial and real one. From 7df1f238ed1a3e71ddb143e19dadb56e76b18ed9 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Tue, 21 Jan 2020 12:02:09 +0530 Subject: [PATCH 17/30] Remove unused leaderElectionTypeConfigMaps const this is a left over const value from the leaderelection type cleanup. Signed-off-by: Madhu Rajanna --- cmd/csi-attacher/main.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/csi-attacher/main.go b/cmd/csi-attacher/main.go index 98fba078f..ce5b1a4d7 100644 --- a/cmd/csi-attacher/main.go +++ b/cmd/csi-attacher/main.go @@ -46,8 +46,7 @@ const ( // Default timeout of short CSI calls like GetPluginInfo csiTimeout = time.Second - leaderElectionTypeLeases = "leases" - leaderElectionTypeConfigMaps = "configmaps" + leaderElectionTypeLeases = "leases" ) // Command line flags From 22020655be584cac3e5d8b1cf6c20d926eb592dd Mon Sep 17 00:00:00 2001 From: Alex Szakaly Date: Sat, 25 Jan 2020 14:07:06 +0100 Subject: [PATCH 18/30] fix: update Go 1.13 Previous commit (5e773d2d) updates CI but go.mod should follow it Signed-off-by: Alex Szakaly --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index fb949a672..0df06e0ce 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kubernetes-csi/external-attacher -go 1.12 +go 1.13 require ( github.com/container-storage-interface/spec v1.2.0 From 8e9aba01a2f5515b69d2a9e1c921863c246721e8 Mon Sep 17 00:00:00 2001 From: Alex Szakaly Date: Sat, 25 Jan 2020 14:07:11 +0100 Subject: [PATCH 19/30] fix: go module requirements for semantic versioning Fixes issue #208. Signed-off-by: Alex Szakaly --- cmd/csi-attacher/main.go | 4 ++-- go.mod | 2 +- pkg/controller/csi_handler.go | 2 +- pkg/controller/csi_handler_test.go | 2 +- pkg/controller/framework_test.go | 2 +- pkg/controller/trivial_handler_test.go | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/csi-attacher/main.go b/cmd/csi-attacher/main.go index ce5b1a4d7..1a5c69c3e 100644 --- a/cmd/csi-attacher/main.go +++ b/cmd/csi-attacher/main.go @@ -36,8 +36,8 @@ import ( "github.com/kubernetes-csi/csi-lib-utils/leaderelection" "github.com/kubernetes-csi/csi-lib-utils/metrics" "github.com/kubernetes-csi/csi-lib-utils/rpc" - "github.com/kubernetes-csi/external-attacher/pkg/attacher" - "github.com/kubernetes-csi/external-attacher/pkg/controller" + "github.com/kubernetes-csi/external-attacher/v2/pkg/attacher" + "github.com/kubernetes-csi/external-attacher/v2/pkg/controller" "google.golang.org/grpc" ) diff --git a/go.mod b/go.mod index 0df06e0ce..5e1d74366 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/kubernetes-csi/external-attacher +module github.com/kubernetes-csi/external-attacher/v2 go 1.13 diff --git a/pkg/controller/csi_handler.go b/pkg/controller/csi_handler.go index 6e93a1952..2fc88d5d8 100644 --- a/pkg/controller/csi_handler.go +++ b/pkg/controller/csi_handler.go @@ -25,7 +25,7 @@ import ( "k8s.io/klog" - "github.com/kubernetes-csi/external-attacher/pkg/attacher" + "github.com/kubernetes-csi/external-attacher/v2/pkg/attacher" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/controller/csi_handler_test.go b/pkg/controller/csi_handler_test.go index 00461f2c9..8f2d06ee7 100644 --- a/pkg/controller/csi_handler_test.go +++ b/pkg/controller/csi_handler_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/kubernetes-csi/external-attacher/pkg/attacher" + "github.com/kubernetes-csi/external-attacher/v2/pkg/attacher" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" diff --git a/pkg/controller/framework_test.go b/pkg/controller/framework_test.go index cc167327c..3fe6aaf65 100644 --- a/pkg/controller/framework_test.go +++ b/pkg/controller/framework_test.go @@ -27,7 +27,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/davecgh/go-spew/spew" - "github.com/kubernetes-csi/external-attacher/pkg/attacher" + "github.com/kubernetes-csi/external-attacher/v2/pkg/attacher" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" diff --git a/pkg/controller/trivial_handler_test.go b/pkg/controller/trivial_handler_test.go index f21f2e05e..dfdcfeecd 100644 --- a/pkg/controller/trivial_handler_test.go +++ b/pkg/controller/trivial_handler_test.go @@ -20,7 +20,7 @@ import ( "errors" "testing" - "github.com/kubernetes-csi/external-attacher/pkg/attacher" + "github.com/kubernetes-csi/external-attacher/v2/pkg/attacher" storage "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" From 7e9187cd448748ea87203aa06808b51878c469c0 Mon Sep 17 00:00:00 2001 From: Alex Szakaly Date: Mon, 27 Jan 2020 22:16:52 +0100 Subject: [PATCH 20/30] feat: add compatibility with dep This commit adds the compatibility with dep (Go dependency management tool) via symlink creation (same method applied in kubernetes-csi/csi-test) Signed-off-by: Alex Szakaly --- v2/README.md | 18 ++++++++++++++++++ v2/cmd/csi-attacher/main.go | 1 + v2/pkg/attacher/attacher.go | 1 + v2/pkg/attacher/attacher_test.go | 1 + v2/pkg/attacher/lister.go | 1 + v2/pkg/controller/controller.go | 1 + v2/pkg/controller/controller_test.go | 1 + v2/pkg/controller/csi_handler.go | 1 + v2/pkg/controller/csi_handler_test.go | 1 + v2/pkg/controller/framework_test.go | 1 + v2/pkg/controller/trivial_handler.go | 1 + v2/pkg/controller/trivial_handler_test.go | 1 + v2/pkg/controller/util.go | 1 + v2/pkg/controller/util_test.go | 1 + 14 files changed, 31 insertions(+) create mode 100644 v2/README.md create mode 120000 v2/cmd/csi-attacher/main.go create mode 120000 v2/pkg/attacher/attacher.go create mode 120000 v2/pkg/attacher/attacher_test.go create mode 120000 v2/pkg/attacher/lister.go create mode 120000 v2/pkg/controller/controller.go create mode 120000 v2/pkg/controller/controller_test.go create mode 120000 v2/pkg/controller/csi_handler.go create mode 120000 v2/pkg/controller/csi_handler_test.go create mode 120000 v2/pkg/controller/framework_test.go create mode 120000 v2/pkg/controller/trivial_handler.go create mode 120000 v2/pkg/controller/trivial_handler_test.go create mode 120000 v2/pkg/controller/util.go create mode 120000 v2/pkg/controller/util_test.go diff --git a/v2/README.md b/v2/README.md new file mode 100644 index 000000000..ebfe2544b --- /dev/null +++ b/v2/README.md @@ -0,0 +1,18 @@ +This directory mirrors the source code via symlinks. +This makes it possible to vendor v2.x releases of +external-attacher with `dep` versions that do not +support semantic imports. Support for that is currently +[pending in dep](https://github.com/golang/dep/pull/1963). + +If users of dep have enabled pruning, they must disable if +for external-attacher in their Gopk.toml, like this: + +```toml +[prune] + go-tests = true + unused-packages = true + + [[prune.project]] + name = "github.com/kubernetes-csi/external-attacher" + unused-packages = false +``` diff --git a/v2/cmd/csi-attacher/main.go b/v2/cmd/csi-attacher/main.go new file mode 120000 index 000000000..8b0581700 --- /dev/null +++ b/v2/cmd/csi-attacher/main.go @@ -0,0 +1 @@ +../../../cmd/csi-attacher/main.go \ No newline at end of file diff --git a/v2/pkg/attacher/attacher.go b/v2/pkg/attacher/attacher.go new file mode 120000 index 000000000..41bfec0b9 --- /dev/null +++ b/v2/pkg/attacher/attacher.go @@ -0,0 +1 @@ +../../../pkg/attacher/attacher.go \ No newline at end of file diff --git a/v2/pkg/attacher/attacher_test.go b/v2/pkg/attacher/attacher_test.go new file mode 120000 index 000000000..f03251c04 --- /dev/null +++ b/v2/pkg/attacher/attacher_test.go @@ -0,0 +1 @@ +../../../pkg/attacher/attacher_test.go \ No newline at end of file diff --git a/v2/pkg/attacher/lister.go b/v2/pkg/attacher/lister.go new file mode 120000 index 000000000..c7d85ed38 --- /dev/null +++ b/v2/pkg/attacher/lister.go @@ -0,0 +1 @@ +../../../pkg/attacher/lister.go \ No newline at end of file diff --git a/v2/pkg/controller/controller.go b/v2/pkg/controller/controller.go new file mode 120000 index 000000000..29bb71b72 --- /dev/null +++ b/v2/pkg/controller/controller.go @@ -0,0 +1 @@ +../../../pkg/controller/controller.go \ No newline at end of file diff --git a/v2/pkg/controller/controller_test.go b/v2/pkg/controller/controller_test.go new file mode 120000 index 000000000..26f818ca0 --- /dev/null +++ b/v2/pkg/controller/controller_test.go @@ -0,0 +1 @@ +../../../pkg/controller/controller_test.go \ No newline at end of file diff --git a/v2/pkg/controller/csi_handler.go b/v2/pkg/controller/csi_handler.go new file mode 120000 index 000000000..b0847b473 --- /dev/null +++ b/v2/pkg/controller/csi_handler.go @@ -0,0 +1 @@ +../../../pkg/controller/csi_handler.go \ No newline at end of file diff --git a/v2/pkg/controller/csi_handler_test.go b/v2/pkg/controller/csi_handler_test.go new file mode 120000 index 000000000..caca87dda --- /dev/null +++ b/v2/pkg/controller/csi_handler_test.go @@ -0,0 +1 @@ +../../../pkg/controller/csi_handler_test.go \ No newline at end of file diff --git a/v2/pkg/controller/framework_test.go b/v2/pkg/controller/framework_test.go new file mode 120000 index 000000000..5494f3186 --- /dev/null +++ b/v2/pkg/controller/framework_test.go @@ -0,0 +1 @@ +../../../pkg/controller/framework_test.go \ No newline at end of file diff --git a/v2/pkg/controller/trivial_handler.go b/v2/pkg/controller/trivial_handler.go new file mode 120000 index 000000000..ac11b861a --- /dev/null +++ b/v2/pkg/controller/trivial_handler.go @@ -0,0 +1 @@ +../../../pkg/controller/trivial_handler.go \ No newline at end of file diff --git a/v2/pkg/controller/trivial_handler_test.go b/v2/pkg/controller/trivial_handler_test.go new file mode 120000 index 000000000..32960d0f1 --- /dev/null +++ b/v2/pkg/controller/trivial_handler_test.go @@ -0,0 +1 @@ +../../../pkg/controller/trivial_handler_test.go \ No newline at end of file diff --git a/v2/pkg/controller/util.go b/v2/pkg/controller/util.go new file mode 120000 index 000000000..1d7316a35 --- /dev/null +++ b/v2/pkg/controller/util.go @@ -0,0 +1 @@ +../../../pkg/controller/util.go \ No newline at end of file diff --git a/v2/pkg/controller/util_test.go b/v2/pkg/controller/util_test.go new file mode 120000 index 000000000..3003fba62 --- /dev/null +++ b/v2/pkg/controller/util_test.go @@ -0,0 +1 @@ +../../../pkg/controller/util_test.go \ No newline at end of file From 1c61d6a7affe7fe8cbc2a2c90976d3e1bdc37ffb Mon Sep 17 00:00:00 2001 From: Danil-Grigorev Date: Fri, 31 Jan 2020 19:07:59 +0100 Subject: [PATCH 21/30] Removed dependency on node annotation in favor of CSINode resource --- pkg/controller/csi_handler.go | 15 ++---- pkg/controller/csi_handler_test.go | 84 +++++++++++------------------- pkg/controller/util.go | 24 +-------- pkg/controller/util_test.go | 67 +----------------------- 4 files changed, 39 insertions(+), 151 deletions(-) diff --git a/pkg/controller/csi_handler.go b/pkg/controller/csi_handler.go index 2fc88d5d8..08ba1c261 100644 --- a/pkg/controller/csi_handler.go +++ b/pkg/controller/csi_handler.go @@ -692,19 +692,14 @@ func (h *csiHandler) getNodeID(driver string, nodeName string, va *storage.Volum klog.V(4).Infof("Found NodeID %s in CSINode %s", nodeID, nodeName) return nodeID, nil } - klog.V(4).Infof("CSINode %s does not contain driver %s", nodeName, driver) // CSINode exists, but does not have the requested driver. - // Fall through to Node annotation. - } else { - // Can't get CSINode, fall through to Node annotation. - klog.V(4).Infof("Can't get CSINode %s: %s", nodeName, err) + errMessage := fmt.Sprintf("CSINode %s does not contain driver %s", nodeName, driver) + klog.V(4).Info(errMessage) + return "", errors.New(errMessage) } - // Check Node annotation. - node, err := h.nodeLister.Get(nodeName) - if err == nil { - return GetNodeIDFromNode(driver, node) - } + // Can't get CSINode, check Volume Attachment. + klog.V(4).Infof("Can't get CSINode %s: %s", nodeName, err) // Check VolumeAttachment annotation as the last resort if caller wants so (i.e. has provided one). if va == nil { diff --git a/pkg/controller/csi_handler_test.go b/pkg/controller/csi_handler_test.go index 8f2d06ee7..3e2c44cec 100644 --- a/pkg/controller/csi_handler_test.go +++ b/pkg/controller/csi_handler_test.go @@ -194,19 +194,10 @@ func node() *v1.Node { return &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: testNodeName, - Annotations: map[string]string{ - "csi.volume.kubernetes.io/nodeid": fmt.Sprintf("{ %q: %q }", testAttacherName, testNodeID), - }, }, } } -func nodeWithoutAnnotations() *v1.Node { - n := node() - n.Annotations = nil - return n -} - func csiNode() *storage.CSINode { return &storage.CSINode{ ObjectMeta: metav1.ObjectMeta{ @@ -295,7 +286,7 @@ func TestCSIHandler(t *testing.T) { // { name: "VolumeAttachment added -> successful attachment", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil /* annotations */), expectedActions: []core.Action{ // Finalizer is saved first @@ -312,7 +303,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec -> successful attachment", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{node(), csiNode()}, addedVA: vaWithInlineSpec(va(false /*attached*/, "" /*finalizer*/, nil /* annotations */)), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -328,7 +319,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "readOnly VolumeAttachment added -> successful attachment", - initialObjects: []runtime.Object{pvReadOnly(pvWithFinalizer()), node()}, + initialObjects: []runtime.Object{pvReadOnly(pvWithFinalizer()), node(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil /* annotations */), expectedActions: []core.Action{ // Finalizer is saved first @@ -345,7 +336,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "readOnly VolumeAttachment with InlineVolumeSpec -> successful attachment", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{node(), csiNode()}, addedVA: vaInlineSpecReadOnly(vaWithInlineSpec(va(false /*attached*/, "" /*finalizer*/, nil /* annotations */))), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -361,7 +352,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment updated -> successful attachment", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -378,7 +369,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec updated -> successful attachment", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{node(), csiNode()}, updatedVA: vaWithInlineSpec(va(false /*attached*/, "" /*finalizer*/, nil /* annotations */)), expectedActions: []core.Action{ // Finalizer is saved first @@ -395,7 +386,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with attributes -> successful attachment", - initialObjects: []runtime.Object{pvWithAttributes(pvWithFinalizer(), map[string]string{"foo": "bar"}), node()}, + initialObjects: []runtime.Object{pvWithAttributes(pvWithFinalizer(), map[string]string{"foo": "bar"}), node(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -412,7 +403,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec and attributes -> successful attachment", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{node(), csiNode()}, updatedVA: vaInlineSpecWithAttributes(vaWithInlineSpec(va(false, "", nil)) /*va*/, map[string]string{"foo": "bar"} /*attributes*/), expectedActions: []core.Action{ // Finalizer is saved first @@ -429,7 +420,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with secrets -> successful attachment", - initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "secret"), node(), secret()}, + initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "secret"), node(), secret(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "secret"), @@ -447,7 +438,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec and secrets -> successful attachment", - initialObjects: []runtime.Object{node(), secret()}, + initialObjects: []runtime.Object{node(), secret(), csiNode()}, updatedVA: vaInlineSpecWithSecret(vaWithInlineSpec(va(false, "", nil)) /*va*/, "secret" /*secret*/), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "secret"), @@ -465,7 +456,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with empty secrets -> successful attachment", - initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "emptySecret"), node(), emptySecret()}, + initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "emptySecret"), node(), emptySecret(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "emptySecret"), @@ -483,7 +474,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec and empty secrets -> successful attachment", - initialObjects: []runtime.Object{node(), emptySecret()}, + initialObjects: []runtime.Object{node(), emptySecret(), csiNode()}, updatedVA: vaInlineSpecWithSecret(vaWithInlineSpec(va(false, "", nil)) /*va*/, "emptySecret" /*secret*/), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "emptySecret"), @@ -513,7 +504,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment updated -> PV finalizer is added", - initialObjects: []runtime.Object{pv(), node()}, + initialObjects: []runtime.Object{pv(), node(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ // PV Finalizer after VA @@ -534,7 +525,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "error saving PV finalizer -> controller retries", - initialObjects: []runtime.Object{pv(), node()}, + initialObjects: []runtime.Object{pv(), node(), csiNode()}, updatedVA: va(false, "", nil), reactors: []reaction{ { @@ -604,7 +595,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment added -> successful attachment incl. metadata", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, addedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -695,12 +686,12 @@ func TestCSIHandler(t *testing.T) { expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, types.MergePatchType, patch(va(false, fin, ann), vaWithAttachError(va(false, fin, ann), - "node \"node1\" not found"))), + "csinode.storage.k8s.io \"node1\" not found"))), }, }, { name: "failed write with VA finializers -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, addedVA: va(false, "", nil), reactors: []reaction{ { @@ -744,7 +735,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "failed write with attached=true -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, addedVA: va(false, "", nil), reactors: []reaction{ { @@ -788,7 +779,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "CSI attach fails -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, addedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -815,7 +806,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "CSI attach times out -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, addedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -832,28 +823,28 @@ func TestCSIHandler(t *testing.T) { }, }, { - name: "Node without annotations -> error", - initialObjects: []runtime.Object{pvWithFinalizer(), nodeWithoutAnnotations()}, + name: "Node without CSINode -> error", + initialObjects: []runtime.Object{pvWithFinalizer(), node()}, addedVA: va(false, fin, ann), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, types.MergePatchType, patch(va(false /*attached*/, fin, ann), - vaWithAttachError(va(false, fin, ann), "node \"node1\" has no NodeID annotation"))), + vaWithAttachError(va(false, fin, ann), "csinode.storage.k8s.io \"node1\" not found"))), }, }, { - name: "CSINode exists without the driver, Node without annotations -> error", - initialObjects: []runtime.Object{pvWithFinalizer(), nodeWithoutAnnotations(), csiNodeEmpty()}, + name: "CSINode exists without the driver -> error", + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNodeEmpty()}, addedVA: va(false, fin, ann), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, types.MergePatchType, patch(va(false /*attached*/, fin, ann), - vaWithAttachError(va(false, fin, ann), "node \"node1\" has no NodeID annotation"))), + vaWithAttachError(va(false, fin, ann), "CSINode node1 does not contain driver csi/test"))), }, }, { name: "CSINode exists with the driver, Node without annotations -> success", - initialObjects: []runtime.Object{pvWithFinalizer(), nodeWithoutAnnotations(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -870,7 +861,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with GCEPersistentDiskVolumeSource -> successful attachment", - initialObjects: []runtime.Object{gcePDPVWithFinalizer(), node()}, + initialObjects: []runtime.Object{gcePDPVWithFinalizer(), node(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -1095,7 +1086,7 @@ func TestCSIHandler(t *testing.T) { expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, types.MergePatchType, patch(deleted(va(true, fin, nil)), - deleted(vaWithDetachError(va(true, fin, nil), "node \"node1\" not found")))), + deleted(vaWithDetachError(va(true, fin, nil), "csinode.storage.k8s.io \"node1\" not found")))), }, }, { @@ -1112,19 +1103,6 @@ func TestCSIHandler(t *testing.T) { {"detach", testVolumeHandle, "annotatedNodeID", noAttrs, noSecrets, readWrite, success, detached, noMetadata, 0}, }, }, - { - name: "VolumeAttachment marked for deletion -> node is preferred over VA annotation for NodeID", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, - addedVA: deleted(va(true, fin, map[string]string{vaNodeIDAnnotation: "annotatedNodeID"})), - expectedActions: []core.Action{ - core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, - types.MergePatchType, patch(deleted(va(true, fin, map[string]string{vaNodeIDAnnotation: "annotatedNodeID"})), - deleted(va(false /*attached*/, "", map[string]string{vaNodeIDAnnotation: "annotatedNodeID"})))), - }, - expectedCSICalls: []csiCall{ - {"detach", testVolumeHandle, testNodeID, noAttrs, noSecrets, readWrite, success, detached, noMetadata, 0}, - }, - }, { name: "failed write with attached=false -> controller retries", initialObjects: []runtime.Object{pvWithFinalizer(), node()}, @@ -1354,7 +1332,7 @@ func TestCSIHandlerReadOnly(t *testing.T) { // { name: "read-write PV -> attached as read-write", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil /* annotations */), expectedActions: []core.Action{ // Finalizer is saved first @@ -1371,7 +1349,7 @@ func TestCSIHandlerReadOnly(t *testing.T) { }, { name: "read-only PV -> attached as read-write", - initialObjects: []runtime.Object{pvReadOnly(pvWithFinalizer()), node()}, + initialObjects: []runtime.Object{pvReadOnly(pvWithFinalizer()), node(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil /* annotations */), expectedActions: []core.Action{ // Finalizer is saved first diff --git a/pkg/controller/util.go b/pkg/controller/util.go index e1e86f429..f65420d93 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -23,8 +23,8 @@ import ( "regexp" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/evanphx/json-patch" - "k8s.io/api/core/v1" + jsonpatch "github.com/evanphx/json-patch" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" @@ -93,7 +93,6 @@ func markAsDetached(client kubernetes.Interface, va *storage.VolumeAttachment) ( const ( defaultFSType = "ext4" - nodeIDAnnotation = "csi.volume.kubernetes.io/nodeid" csiVolAttribsAnnotationKey = "csi.volume.kubernetes.io/volume-attributes" vaNodeIDAnnotation = "csi.alpha.kubernetes.io/node-id" ) @@ -114,25 +113,6 @@ func GetFinalizerName(driver string) string { return "external-attacher/" + SanitizeDriverName(driver) } -// GetNodeIDFromNode returns nodeID string from node annotations. -func GetNodeIDFromNode(driver string, node *v1.Node) (string, error) { - nodeIDJSON, ok := node.Annotations[nodeIDAnnotation] - if !ok { - return "", fmt.Errorf("node %q has no NodeID annotation", node.Name) - } - - var nodeIDs map[string]string - if err := json.Unmarshal([]byte(nodeIDJSON), &nodeIDs); err != nil { - return "", fmt.Errorf("cannot parse NodeID annotation on node %q: %s", node.Name, err) - } - nodeID, ok := nodeIDs[driver] - if !ok { - return "", fmt.Errorf("cannot find NodeID for driver %q for node %q", driver, node.Name) - } - - return nodeID, nil -} - // GetNodeIDFromCSINode returns nodeID from CSIDriverInfoSpec func GetNodeIDFromCSINode(driver string, csiNode *storage.CSINode) (string, bool) { for _, d := range csiNode.Spec.Drivers { diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index f2b6a9bb4..321c836f8 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -20,78 +20,13 @@ import ( "testing" "github.com/container-storage-interface/spec/lib/go/csi" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" ) const ( driverName = "foo/bar" ) -func TestGetNodeIDFromNode(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - expectedID string - expectError bool - }{ - { - name: "single key", - annotations: map[string]string{"csi.volume.kubernetes.io/nodeid": "{\"foo/bar\": \"MyNodeID\"}"}, - expectedID: "MyNodeID", - expectError: false, - }, - { - name: "multiple keys", - annotations: map[string]string{ - "csi.volume.kubernetes.io/nodeid": "{\"foo/bar\": \"MyNodeID\", \"-foo/bar\": \"MyNodeID2\", \"foo/bar-\": \"MyNodeID3\"}", - }, - expectedID: "MyNodeID", - expectError: false, - }, - { - name: "no annotations", - annotations: nil, - expectedID: "", - expectError: true, - }, - { - name: "invalid JSON", - annotations: map[string]string{"csi.volume.kubernetes.io/nodeid": "\"foo/bar\": \"MyNodeID\""}, - expectedID: "", - expectError: true, - }, - { - name: "annotations for another driver", - annotations: map[string]string{ - "csi.volume.kubernetes.io/nodeid": "{\"-foo/bar\": \"MyNodeID2\", \"foo/bar-\": \"MyNodeID3\"}", - }, - expectedID: "", - expectError: true, - }, - } - - for _, test := range tests { - node := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "abc", - Annotations: test.annotations, - }, - } - nodeID, err := GetNodeIDFromNode(driverName, node) - - if err == nil && test.expectError { - t.Errorf("test %s: expected error, got none", test.name) - } - if err != nil && !test.expectError { - t.Errorf("test %s: got error: %s", test.name, err) - } - if !test.expectError && nodeID != test.expectedID { - t.Errorf("test %s: unexpected NodeID: %s", test.name, nodeID) - } - } -} - func createBlockCapability(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability { return &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Block{ From 8847215132c6a989ecef1e00a9de0886c6223b78 Mon Sep 17 00:00:00 2001 From: Danil-Grigorev Date: Mon, 3 Feb 2020 13:21:12 +0100 Subject: [PATCH 22/30] Removed RBAC for Nodes, updated tests --- cmd/csi-attacher/main.go | 3 +- deploy/kubernetes/rbac.yaml | 5 +- pkg/controller/csi_handler.go | 7 +- pkg/controller/csi_handler_test.go | 116 ++++++++++++++++------------- 4 files changed, 70 insertions(+), 61 deletions(-) diff --git a/cmd/csi-attacher/main.go b/cmd/csi-attacher/main.go index 1a5c69c3e..44c22e6f6 100644 --- a/cmd/csi-attacher/main.go +++ b/cmd/csi-attacher/main.go @@ -154,12 +154,11 @@ func main() { } if supportsAttach { pvLister := factory.Core().V1().PersistentVolumes().Lister() - nodeLister := factory.Core().V1().Nodes().Lister() vaLister := factory.Storage().V1beta1().VolumeAttachments().Lister() csiNodeLister := factory.Storage().V1beta1().CSINodes().Lister() volAttacher := attacher.NewAttacher(csiConn) CSIVolumeLister := attacher.NewVolumeLister(csiConn) - handler = controller.NewCSIHandler(clientset, csiAttacher, volAttacher, CSIVolumeLister, pvLister, nodeLister, csiNodeLister, vaLister, timeout, supportsReadOnly, csitrans.New()) + handler = controller.NewCSIHandler(clientset, csiAttacher, volAttacher, CSIVolumeLister, pvLister, csiNodeLister, vaLister, timeout, supportsReadOnly, csitrans.New()) klog.V(2).Infof("CSI driver supports ControllerPublishUnpublish, using real CSI handler") } else { handler = controller.NewTrivialHandler(clientset) diff --git a/deploy/kubernetes/rbac.yaml b/deploy/kubernetes/rbac.yaml index f5d97a117..93ce31962 100644 --- a/deploy/kubernetes/rbac.yaml +++ b/deploy/kubernetes/rbac.yaml @@ -16,7 +16,7 @@ metadata: namespace: default --- -# Attacher must be able to work with PVs, nodes and VolumeAttachments +# Attacher must be able to work with PVs, CSINodes and VolumeAttachments kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -25,9 +25,6 @@ rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "update", "patch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch"] diff --git a/pkg/controller/csi_handler.go b/pkg/controller/csi_handler.go index 08ba1c261..c6c3a259f 100644 --- a/pkg/controller/csi_handler.go +++ b/pkg/controller/csi_handler.go @@ -62,7 +62,6 @@ type csiHandler struct { attacher attacher.Attacher CSIVolumeLister VolumeLister pvLister corelisters.PersistentVolumeLister - nodeLister corelisters.NodeLister csiNodeLister storagelisters.CSINodeLister vaLister storagelisters.VolumeAttachmentLister vaQueue, pvQueue workqueue.RateLimitingInterface @@ -82,7 +81,6 @@ func NewCSIHandler( attacher attacher.Attacher, CSIVolumeLister VolumeLister, pvLister corelisters.PersistentVolumeLister, - nodeLister corelisters.NodeLister, csiNodeLister storagelisters.CSINodeLister, vaLister storagelisters.VolumeAttachmentLister, timeout *time.Duration, @@ -95,7 +93,6 @@ func NewCSIHandler( attacher: attacher, CSIVolumeLister: CSIVolumeLister, pvLister: pvLister, - nodeLister: nodeLister, csiNodeLister: csiNodeLister, vaLister: vaLister, timeout: *timeout, @@ -682,7 +679,7 @@ func (h *csiHandler) getCredentialsFromPV(csiSource *v1.CSIPersistentVolumeSourc return credentials, nil } -// getNodeID finds node ID from Node API object. If caller wants, it can find +// getNodeID finds node ID from CSINode API object. If caller wants, it can find // node ID stored in VolumeAttachment annotation. func (h *csiHandler) getNodeID(driver string, nodeName string, va *storage.VolumeAttachment) (string, error) { // Try to find CSINode first. @@ -709,7 +706,7 @@ func (h *csiHandler) getNodeID(driver string, nodeName string, va *storage.Volum return nodeID, nil } - // return nodeLister.Get error + // return csiNodeLister.Get error return "", err } diff --git a/pkg/controller/csi_handler_test.go b/pkg/controller/csi_handler_test.go index 3e2c44cec..d60f9262e 100644 --- a/pkg/controller/csi_handler_test.go +++ b/pkg/controller/csi_handler_test.go @@ -57,7 +57,6 @@ func csiHandlerFactory(client kubernetes.Interface, informerFactory informers.Sh csi, lister, informerFactory.Core().V1().PersistentVolumes().Lister(), - informerFactory.Core().V1().Nodes().Lister(), informerFactory.Storage().V1beta1().CSINodes().Lister(), informerFactory.Storage().V1beta1().VolumeAttachments().Lister(), &timeout, @@ -73,7 +72,6 @@ func csiHandlerFactoryNoReadOnly(client kubernetes.Interface, informerFactory in csi, lister, informerFactory.Core().V1().PersistentVolumes().Lister(), - informerFactory.Core().V1().Nodes().Lister(), informerFactory.Storage().V1beta1().CSINodes().Lister(), informerFactory.Storage().V1beta1().VolumeAttachments().Lister(), &timeout, @@ -198,6 +196,14 @@ func node() *v1.Node { } } +func nodeWithAnnotations() *v1.Node { + node := node() + node.Annotations = map[string]string{ + "csi.volume.kubernetes.io/nodeid": fmt.Sprintf("{ %q: %q }", testAttacherName, testNodeID), + } + return node +} + func csiNode() *storage.CSINode { return &storage.CSINode{ ObjectMeta: metav1.ObjectMeta{ @@ -286,7 +292,7 @@ func TestCSIHandler(t *testing.T) { // { name: "VolumeAttachment added -> successful attachment", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil /* annotations */), expectedActions: []core.Action{ // Finalizer is saved first @@ -303,7 +309,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec -> successful attachment", - initialObjects: []runtime.Object{node(), csiNode()}, + initialObjects: []runtime.Object{csiNode()}, addedVA: vaWithInlineSpec(va(false /*attached*/, "" /*finalizer*/, nil /* annotations */)), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -319,7 +325,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "readOnly VolumeAttachment added -> successful attachment", - initialObjects: []runtime.Object{pvReadOnly(pvWithFinalizer()), node(), csiNode()}, + initialObjects: []runtime.Object{pvReadOnly(pvWithFinalizer()), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil /* annotations */), expectedActions: []core.Action{ // Finalizer is saved first @@ -336,7 +342,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "readOnly VolumeAttachment with InlineVolumeSpec -> successful attachment", - initialObjects: []runtime.Object{node(), csiNode()}, + initialObjects: []runtime.Object{csiNode()}, addedVA: vaInlineSpecReadOnly(vaWithInlineSpec(va(false /*attached*/, "" /*finalizer*/, nil /* annotations */))), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -352,7 +358,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment updated -> successful attachment", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -369,7 +375,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec updated -> successful attachment", - initialObjects: []runtime.Object{node(), csiNode()}, + initialObjects: []runtime.Object{csiNode()}, updatedVA: vaWithInlineSpec(va(false /*attached*/, "" /*finalizer*/, nil /* annotations */)), expectedActions: []core.Action{ // Finalizer is saved first @@ -386,7 +392,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with attributes -> successful attachment", - initialObjects: []runtime.Object{pvWithAttributes(pvWithFinalizer(), map[string]string{"foo": "bar"}), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithAttributes(pvWithFinalizer(), map[string]string{"foo": "bar"}), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -403,7 +409,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec and attributes -> successful attachment", - initialObjects: []runtime.Object{node(), csiNode()}, + initialObjects: []runtime.Object{csiNode()}, updatedVA: vaInlineSpecWithAttributes(vaWithInlineSpec(va(false, "", nil)) /*va*/, map[string]string{"foo": "bar"} /*attributes*/), expectedActions: []core.Action{ // Finalizer is saved first @@ -420,7 +426,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with secrets -> successful attachment", - initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "secret"), node(), secret(), csiNode()}, + initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "secret"), secret(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "secret"), @@ -438,7 +444,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec and secrets -> successful attachment", - initialObjects: []runtime.Object{node(), secret(), csiNode()}, + initialObjects: []runtime.Object{secret(), csiNode()}, updatedVA: vaInlineSpecWithSecret(vaWithInlineSpec(va(false, "", nil)) /*va*/, "secret" /*secret*/), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "secret"), @@ -456,7 +462,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with empty secrets -> successful attachment", - initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "emptySecret"), node(), emptySecret(), csiNode()}, + initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "emptySecret"), emptySecret(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "emptySecret"), @@ -474,7 +480,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec and empty secrets -> successful attachment", - initialObjects: []runtime.Object{node(), emptySecret(), csiNode()}, + initialObjects: []runtime.Object{emptySecret(), csiNode()}, updatedVA: vaInlineSpecWithSecret(vaWithInlineSpec(va(false, "", nil)) /*va*/, "emptySecret" /*secret*/), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "emptySecret"), @@ -492,7 +498,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with missing secrets -> error", - initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "unknownSecret"), node()}, + initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "unknownSecret"), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "unknownSecret"), @@ -504,7 +510,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment updated -> PV finalizer is added", - initialObjects: []runtime.Object{pv(), node(), csiNode()}, + initialObjects: []runtime.Object{pv(), csiNode()}, updatedVA: va(false, "", nil), expectedActions: []core.Action{ // PV Finalizer after VA @@ -525,7 +531,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "error saving PV finalizer -> controller retries", - initialObjects: []runtime.Object{pv(), node(), csiNode()}, + initialObjects: []runtime.Object{pv(), csiNode()}, updatedVA: va(false, "", nil), reactors: []reaction{ { @@ -577,14 +583,14 @@ func TestCSIHandler(t *testing.T) { }, { name: "already attached volume -> ignored", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, updatedVA: va(true, fin, ann), expectedActions: []core.Action{}, expectedCSICalls: []csiCall{}, }, { name: "PV with deletion timestamp -> ignored with error", - initialObjects: []runtime.Object{pvDeleted(pv()), node()}, + initialObjects: []runtime.Object{pvDeleted(pv()), csiNode()}, updatedVA: va(false, fin, ann), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -595,7 +601,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment added -> successful attachment incl. metadata", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -611,13 +617,13 @@ func TestCSIHandler(t *testing.T) { }, { name: "unknown driver -> ignored", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: vaWithInvalidDriver(va(false, fin, ann)), expectedActions: []core.Action{}, }, { name: "unknown PV -> error", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{csiNode()}, addedVA: va(false, fin, ann), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -627,7 +633,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "unknown PV -> error + error saving the error", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{csiNode()}, addedVA: va(false, fin, ann), reactors: []reaction{ { @@ -661,7 +667,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "neither PV nor InlineVolumeSpec reference-> error", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: vaWithNoPVReferenceNorInlineVolumeSpec(va(false, fin, ann)), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -671,7 +677,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "both PV and InlineVolumeSpec reference-> error", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: vaAddInlineSpec(va(false, fin, ann)), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -691,7 +697,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "failed write with VA finializers -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: va(false, "", nil), reactors: []reaction{ { @@ -735,7 +741,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "failed write with attached=true -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: va(false, "", nil), reactors: []reaction{ { @@ -779,7 +785,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "CSI attach fails -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -806,7 +812,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "CSI attach times out -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: va(false, "", nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -832,9 +838,19 @@ func TestCSIHandler(t *testing.T) { vaWithAttachError(va(false, fin, ann), "csinode.storage.k8s.io \"node1\" not found"))), }, }, + { + name: "Node with annotations, CSINode is absent -> error", + initialObjects: []runtime.Object{pvWithFinalizer(), nodeWithAnnotations()}, + addedVA: va(false, fin, ann), + expectedActions: []core.Action{ + core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, + types.MergePatchType, patch(va(false /*attached*/, fin, ann), + vaWithAttachError(va(false, fin, ann), "csinode.storage.k8s.io \"node1\" not found"))), + }, + }, { name: "CSINode exists without the driver -> error", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNodeEmpty()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNodeEmpty()}, addedVA: va(false, fin, ann), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -844,7 +860,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "CSINode exists with the driver, Node without annotations -> success", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -861,7 +877,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with GCEPersistentDiskVolumeSource -> successful attachment", - initialObjects: []runtime.Object{gcePDPVWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{gcePDPVWithFinalizer(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil), expectedActions: []core.Action{ // Finalizer is saved first @@ -882,7 +898,7 @@ func TestCSIHandler(t *testing.T) { { name: "VolumeAttachment marked for deletion -> successful detach", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: deleted(va(true, fin, ann)), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -895,7 +911,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with InlineVolumeSpec marked for deletion -> successful detach", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{csiNode()}, addedVA: deleted(vaWithInlineSpec(va(true, fin, ann))), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -908,7 +924,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "volume with secrets -> successful detach", - initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "secret"), node(), secret()}, + initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "secret"), secret()}, addedVA: deleted(va(true, fin, ann)), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "secret"), @@ -922,7 +938,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "volume attachment with InlineVolumeSpec and secrets -> successful detach", - initialObjects: []runtime.Object{node(), secret()}, + initialObjects: []runtime.Object{secret()}, addedVA: deleted(vaInlineSpecWithSecret(vaWithInlineSpec(va(true, fin, ann)), "secret")), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "secret"), @@ -936,7 +952,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "volume with empty secrets -> successful detach", - initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "emptySecret"), node(), emptySecret()}, + initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "emptySecret"), emptySecret()}, addedVA: deleted(va(true, fin, ann)), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "emptySecret"), @@ -950,7 +966,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "volume attachment with InlineVolumeSpec and empty secrets -> successful detach", - initialObjects: []runtime.Object{node(), emptySecret()}, + initialObjects: []runtime.Object{emptySecret()}, addedVA: deleted(vaInlineSpecWithSecret(vaWithInlineSpec(va(true, fin, ann)), "emptySecret")), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "emptySecret"), @@ -964,7 +980,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "volume with missing secrets -> error", - initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "unknownSecret"), node()}, + initialObjects: []runtime.Object{pvWithSecret(pvWithFinalizer(), "unknownSecret"), csiNode()}, addedVA: deleted(va(true, fin, ann)), expectedActions: []core.Action{ core.NewGetAction(secretGroupResourceVersion, "default", "unknownSecret"), @@ -977,7 +993,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "CSI detach fails with an error -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: deleted(va(true, fin, ann)), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -994,7 +1010,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "CSI detach times out -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: deleted(va(true, fin, ann)), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -1008,14 +1024,14 @@ func TestCSIHandler(t *testing.T) { }, { name: "already detached volume -> ignored", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, updatedVA: deleted(va(false, "", nil)), expectedActions: []core.Action{}, expectedCSICalls: []csiCall{}, }, { name: "detach unknown PV -> error", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{csiNode()}, addedVA: deleted(va(true, fin, ann)), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -1025,7 +1041,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "detach unknown PV -> error + error saving the error", - initialObjects: []runtime.Object{node()}, + initialObjects: []runtime.Object{csiNode()}, addedVA: deleted(va(true, fin, ann)), reactors: []reaction{ { @@ -1059,7 +1075,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "detach VA with neither PV nor InlineCSIVolumeSource reference-> error", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: deleted(vaWithNoPVReferenceNorInlineVolumeSpec(va(true, fin, ann))), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -1070,7 +1086,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "detach VA with both PV and InlineCSIVolumeSource reference-> error", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: deleted(vaAddInlineSpec(va(true, fin, ann))), expectedActions: []core.Action{ core.NewPatchAction(vaGroupResourceVersion, metav1.NamespaceNone, testPVName+"-"+testNodeName, @@ -1105,7 +1121,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "failed write with attached=false -> controller retries", - initialObjects: []runtime.Object{pvWithFinalizer(), node()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: deleted(va(false, fin, ann)), reactors: []reaction{ { @@ -1148,7 +1164,7 @@ func TestCSIHandler(t *testing.T) { }, { name: "VolumeAttachment with GCEPersistentDiskVolumeSource marked for deletion -> successful detach", - initialObjects: []runtime.Object{gcePDPVWithFinalizer(), node()}, + initialObjects: []runtime.Object{gcePDPVWithFinalizer(), csiNode()}, addedVA: deleted(va(true /*attached*/, fin /*finalizer*/, ann)), expectedActions: []core.Action{ // Finalizer is saved first @@ -1332,7 +1348,7 @@ func TestCSIHandlerReadOnly(t *testing.T) { // { name: "read-write PV -> attached as read-write", - initialObjects: []runtime.Object{pvWithFinalizer(), node(), csiNode()}, + initialObjects: []runtime.Object{pvWithFinalizer(), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil /* annotations */), expectedActions: []core.Action{ // Finalizer is saved first @@ -1349,7 +1365,7 @@ func TestCSIHandlerReadOnly(t *testing.T) { }, { name: "read-only PV -> attached as read-write", - initialObjects: []runtime.Object{pvReadOnly(pvWithFinalizer()), node(), csiNode()}, + initialObjects: []runtime.Object{pvReadOnly(pvWithFinalizer()), csiNode()}, addedVA: va(false /*attached*/, "" /*finalizer*/, nil /* annotations */), expectedActions: []core.Action{ // Finalizer is saved first From 84f78b120e891b76b3e368cb0d20502baec026ea Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 10 Feb 2020 13:08:33 +0100 Subject: [PATCH 23/30] prow.sh: generic driver installation This relies on a slightly different deployment script: a "deploy.sh" must exist which knows that it has to dump a test driver configurion into the file pointed to with CSI_PROW_TEST_DRIVER, if that env variable is set. That way, we no longer need to know what capabilities the installed driver has. --- prow.sh | 104 +++++++++++++++++++++++--------------------------------- 1 file changed, 43 insertions(+), 61 deletions(-) diff --git a/prow.sh b/prow.sh index 118c5bd19..bb80741e2 100755 --- a/prow.sh +++ b/prow.sh @@ -157,7 +157,9 @@ csi_prow_kubernetes_version_suffix="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | tr # the caller. configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csiprow.XXXXXXXXXX")" "work directory" -# The hostpath deployment script is searched for in several places. +# By default, this script tests sidecars with the CSI hostpath driver, +# using the install_csi_driver function. That function depends on +# a deployment script that it searches for in several places: # # - The "deploy" directory in the current repository: this is useful # for the situation that a component becomes incompatible with the @@ -165,11 +167,11 @@ configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csip # own example until the shared one can be updated; it's also how # csi-driver-host-path itself provides the example. # -# - CSI_PROW_HOSTPATH_VERSION of the CSI_PROW_HOSTPATH_REPO is checked +# - CSI_PROW_DRIVER_VERSION of the CSI_PROW_DRIVER_REPO is checked # out: this allows other repos to reference a version of the example # that is known to be compatible. # -# - The csi-driver-host-path/deploy directory has multiple sub-directories, +# - The /deploy directory can have multiple sub-directories, # each with different deployments (stable set of images for Kubernetes 1.13, # stable set of images for Kubernetes 1.14, canary for latest Kubernetes, etc.). # This is necessary because there may be incompatible changes in the @@ -186,16 +188,26 @@ configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csip # "none" disables the deployment of the hostpath driver. # # When no deploy script is found (nothing in `deploy` directory, -# CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. -configvar CSI_PROW_HOSTPATH_VERSION "v1.3.0-rc3" "hostpath driver" -configvar CSI_PROW_HOSTPATH_REPO https://github.com/kubernetes-csi/csi-driver-host-path "hostpath repo" +# CSI_PROW_DRIVER_REPO=none), nothing gets deployed. +# +# If the deployment script is called with CSI_PROW_TEST_DRIVER= as +# environment variable, then it must write a suitable test driver configuration +# into that file in addition to installing the driver. +configvar CSI_PROW_DRIVER_VERSION "v1.3.0-rc4" "CSI driver version" +configvar CSI_PROW_DRIVER_REPO https://github.com/kubernetes-csi/csi-driver-host-path "CSI driver repo" configvar CSI_PROW_DEPLOYMENT "" "deployment" -configvar CSI_PROW_HOSTPATH_DRIVER_NAME "hostpath.csi.k8s.io" "the hostpath driver name" -# If CSI_PROW_HOSTPATH_CANARY is set (typically to "canary", but also -# "1.0-canary"), then all image versions are replaced with that -# version tag. -configvar CSI_PROW_HOSTPATH_CANARY "" "hostpath image" +# The install_csi_driver function may work also for other CSI drivers, +# as long as they follow the conventions of the CSI hostpath driver. +# If they don't, then a different install function can be provided in +# a .prow.sh file and this config variable can be overridden. +configvar CSI_PROW_DRIVER_INSTALL "install_csi_driver" "name of the shell function which installs the CSI driver" + +# If CSI_PROW_DRIVER_CANARY is set (typically to "canary", but also +# version tag. Usually empty. CSI_PROW_HOSTPATH_CANARY is +# accepted as alternative name because some test-infra jobs +# still use that name. +configvar CSI_PROW_DRIVER_CANARY "${CSI_PROW_HOSTPATH_CANARY}" "driver image override for canary images" # The E2E testing can come from an arbitrary repo. The expectation is that # the repo supports "go test ./test/e2e -args --storage.testdriver" (https://github.com/kubernetes/kubernetes/pull/72836) @@ -613,7 +625,7 @@ find_deployment () { # Fixed deployment name? Use it if it exists, otherwise fail. if [ "${CSI_PROW_DEPLOYMENT}" ]; then - file="$dir/${CSI_PROW_DEPLOYMENT}/deploy-hostpath.sh" + file="$dir/${CSI_PROW_DEPLOYMENT}/deploy.sh" if ! [ -e "$file" ]; then return 1 fi @@ -623,9 +635,9 @@ find_deployment () { # Ignore: See if you can use ${variable//search/replace} instead. # shellcheck disable=SC2001 - file="$dir/kubernetes-$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/\([0-9]*\)\.\([0-9]*\).*/\1.\2/')/deploy-hostpath.sh" + file="$dir/kubernetes-$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/\([0-9]*\)\.\([0-9]*\).*/\1.\2/')/deploy.sh" if ! [ -e "$file" ]; then - file="$dir/kubernetes-latest/deploy-hostpath.sh" + file="$dir/kubernetes-latest/deploy.sh" if ! [ -e "$file" ]; then return 1 fi @@ -633,12 +645,11 @@ find_deployment () { echo "$file" } -# This installs the hostpath driver example. CSI_PROW_HOSTPATH_CANARY overrides all -# image versions with that canary version. The parameters of install_hostpath can be -# used to override registry and/or tag of individual images (CSI_PROVISIONER_REGISTRY=localhost:9000 -# CSI_PROVISIONER_TAG=latest). -install_hostpath () { - local images deploy_hostpath +# This installs the CSI driver. It's called with a list of env variables +# that override the default images. CSI_PROW_DRIVER_CANARY overrides all +# image versions with that canary version. +install_csi_driver () { + local images deploy_driver images="$*" if [ "${CSI_PROW_DEPLOYMENT}" = "none" ]; then @@ -654,31 +665,31 @@ install_hostpath () { done fi - if deploy_hostpath="$(find_deployment "$(pwd)/deploy")"; then + if deploy_driver="$(find_deployment "$(pwd)/deploy")"; then : - elif [ "${CSI_PROW_HOSTPATH_REPO}" = "none" ]; then + elif [ "${CSI_PROW_DRIVER_REPO}" = "none" ]; then return 1 else - git_checkout "${CSI_PROW_HOSTPATH_REPO}" "${CSI_PROW_WORK}/hostpath" "${CSI_PROW_HOSTPATH_VERSION}" --depth=1 || die "checking out hostpath repo failed" - if deploy_hostpath="$(find_deployment "${CSI_PROW_WORK}/hostpath/deploy")"; then + git_checkout "${CSI_PROW_DRIVER_REPO}" "${CSI_PROW_WORK}/csi-driver" "${CSI_PROW_DRIVER_VERSION}" --depth=1 || die "checking out CSI driver repo failed" + if deploy_driver="$(find_deployment "${CSI_PROW_WORK}/csi-driver/deploy")"; then : else - die "deploy-hostpath.sh not found in ${CSI_PROW_HOSTPATH_REPO} ${CSI_PROW_HOSTPATH_VERSION}. To disable E2E testing, set CSI_PROW_HOSTPATH_REPO=none" + die "deploy.sh not found in ${CSI_PROW_DRIVER_REPO} ${CSI_PROW_DRIVER_VERSION}. To disable E2E testing, set CSI_PROW_DRIVER_REPO=none" fi fi - if [ "${CSI_PROW_HOSTPATH_CANARY}" != "stable" ]; then - images="$images IMAGE_TAG=${CSI_PROW_HOSTPATH_CANARY}" + if [ "${CSI_PROW_DRIVER_CANARY}" != "stable" ]; then + images="$images IMAGE_TAG=${CSI_PROW_DRIVER_CANARY}" fi # Ignore: Double quote to prevent globbing and word splitting. # It's intentional here for $images. # shellcheck disable=SC2086 - if ! run env $images "${deploy_hostpath}"; then + if ! run env "CSI_PROW_TEST_DRIVER=${CSI_PROW_WORK}/test-driver.yaml" $images "${deploy_driver}"; then # Collect information about failed deployment before failing. collect_cluster_info (start_loggers >/dev/null; wait) info "For container output see job artifacts." - die "deploying the hostpath driver with ${deploy_hostpath} failed" + die "deploying the CSI driver with ${deploy_driver} failed" fi } @@ -804,33 +815,6 @@ install_sanity () ( run_with_go "${CSI_PROW_GO_VERSION_SANITY}" go test -c -o "${CSI_PROW_WORK}/csi-sanity" "${CSI_PROW_SANITY_IMPORT_PATH}/cmd/csi-sanity" || die "building csi-sanity failed" ) -# The default implementation of this function generates a external -# driver test configuration for the hostpath driver. -# -# The content depends on both what the E2E suite expects and what the -# installed hostpath driver supports. Generating it here seems prone -# to breakage, but it is uncertain where a better place might be. -generate_test_driver () { - cat <"${CSI_PROW_WORK}/test-driver.yaml" || die "generating test-driver.yaml failed" - # Rename, merge and filter JUnit files. Necessary in case that we run the E2E suite again # and to avoid the large number of "skipped" tests that we get from using # the full Kubernetes E2E testsuite while only running a few tests. @@ -1063,7 +1045,7 @@ main () { cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')" # Get the image that was just built (if any) from the # top-level Makefile CMDS variable and set the - # deploy-hostpath.sh env variables for it. We also need to + # deploy.sh env variables for it. We also need to # side-load those images into the cluster. for i in $cmds; do e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _) @@ -1101,7 +1083,7 @@ main () { fi # Installing the driver might be disabled. - if install_hostpath "$images"; then + if ${CSI_PROW_DRIVER_INSTALL} "$images"; then collect_cluster_info if sanity_enabled; then @@ -1158,7 +1140,7 @@ main () { fi # Installing the driver might be disabled. - if install_hostpath "$images"; then + if ${CSI_PROW_DRIVER_INSTALL} "$images"; then collect_cluster_info if tests_enabled "parallel-alpha"; then From 5f74333a466f0767cec812945c970c943a59cf7f Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 10 Feb 2020 11:06:31 +0100 Subject: [PATCH 24/30] prow.sh: also configure feature gates for kubelet That this hasn't been done before is an oversight. Apparently it hasn't been a problem because there never have been feature gates that mattered? --- prow.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/prow.sh b/prow.sh index 118c5bd19..6778e8b8e 100755 --- a/prow.sh +++ b/prow.sh @@ -568,6 +568,13 @@ kubeadmConfigPatches: nodeRegistration: kubeletExtraArgs: "feature-gates": "$gates" +- | + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + metadata: + name: config + featureGates: +$(list_gates "$gates") - | apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration From fdb32183fea99d452a2e110ef9ca6bfd6f3e9fa3 Mon Sep 17 00:00:00 2001 From: Jan Wozniak Date: Thu, 13 Feb 2020 11:21:45 +0100 Subject: [PATCH 25/30] Change 'make test-vet' to call 'go vet' --- build.make | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.make b/build.make index a9b9d25dc..3bf3391c2 100644 --- a/build.make +++ b/build.make @@ -113,7 +113,7 @@ test-go: test: test-vet test-vet: @ echo; echo "### $@:" - go test $(GOFLAGS_VENDOR) `go list $(GOFLAGS_VENDOR) ./... | grep -v vendor $(TEST_VET_FILTER_CMD)` + go vet $(GOFLAGS_VENDOR) `go list $(GOFLAGS_VENDOR) ./... | grep -v vendor $(TEST_VET_FILTER_CMD)` .PHONY: test-fmt test: test-fmt From 7c5a89c8fcd634c4413837bc1042bd4bf27a93e6 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 14 Feb 2020 09:38:16 +0100 Subject: [PATCH 26/30] prow.sh: use 1.3.0 hostpath driver for testing The final 1.3.0 release of the hostpath driver really uses the 1.3.0 driver image in its deployment, in contrast to the previous -rc candidates which still used 1.2.0. --- prow.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index 0331ac3f8..d54dea518 100755 --- a/prow.sh +++ b/prow.sh @@ -193,7 +193,7 @@ configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csip # If the deployment script is called with CSI_PROW_TEST_DRIVER= as # environment variable, then it must write a suitable test driver configuration # into that file in addition to installing the driver. -configvar CSI_PROW_DRIVER_VERSION "v1.3.0-rc4" "CSI driver version" +configvar CSI_PROW_DRIVER_VERSION "v1.3.0" "CSI driver version" configvar CSI_PROW_DRIVER_REPO https://github.com/kubernetes-csi/csi-driver-host-path "CSI driver repo" configvar CSI_PROW_DEPLOYMENT "" "deployment" From c31dc8d395d393d59bbe031ffdc9b7a1764b0a42 Mon Sep 17 00:00:00 2001 From: Rafael Franzke Date: Wed, 25 Mar 2020 06:37:01 +0100 Subject: [PATCH 27/30] Attacher reacts for deleted PVs if finalizer is present --- pkg/controller/controller.go | 9 +++------ pkg/controller/csi_handler_test.go | 20 ++++++++++++++++++++ 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index a59a283cc..43d95aa20 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -20,12 +20,11 @@ import ( "fmt" "time" - "k8s.io/klog" - v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/api/equality" apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1beta1" @@ -37,6 +36,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" ) // CSIAttachController is a controller that attaches / detaches CSI volumes using provided Handler interface @@ -217,10 +217,7 @@ func (ctrl *CSIAttachController) syncVA() { } func (ctrl *CSIAttachController) processFinalizers(pv *v1.PersistentVolume) bool { - if pv.Spec.CSI != nil && pv.Spec.CSI.Driver == ctrl.attacherName { - return true - } - return false + return pv.DeletionTimestamp != nil && sets.NewString(pv.Finalizers...).Has(GetFinalizerName(ctrl.attacherName)) } // syncPV deals with one key off the queue. It returns false when it's time to quit. diff --git a/pkg/controller/csi_handler_test.go b/pkg/controller/csi_handler_test.go index 8f2d06ee7..350b8d665 100644 --- a/pkg/controller/csi_handler_test.go +++ b/pkg/controller/csi_handler_test.go @@ -1197,6 +1197,26 @@ func TestCSIHandler(t *testing.T) { pvDeleted(pv()))), }, }, + { + name: "VA deleted -> PV finalizer removed (GCE PD PV)", + initialObjects: []runtime.Object{pvDeleted(gcePDPVWithFinalizer())}, + deletedVA: va(false, "", nil), + expectedActions: []core.Action{ + core.NewPatchAction(pvGroupResourceVersion, metav1.NamespaceNone, testPVName, + types.MergePatchType, patch(pvDeleted(gcePDPVWithFinalizer()), + pvDeleted(gcePDPV()))), + }, + }, + { + name: "VA deleted -> PV finalizer not removed", + initialObjects: []runtime.Object{pvDeleted(pv())}, + deletedVA: va(false, "", nil), + }, + { + name: "VA deleted -> PV finalizer not removed (GCE PD PV)", + initialObjects: []runtime.Object{pvDeleted(gcePDPV())}, + deletedVA: va(false, "", nil), + }, { name: "PV updated -> PV finalizer removed", initialObjects: []runtime.Object{}, From 9c18798d71512e3889809ea2372c56caff7e9ee3 Mon Sep 17 00:00:00 2001 From: windayski Date: Tue, 31 Mar 2020 09:44:00 +0800 Subject: [PATCH 28/30] fix typos --- pkg/controller/csi_handler.go | 2 +- pkg/controller/trivial_handler.go | 2 +- pkg/controller/util_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controller/csi_handler.go b/pkg/controller/csi_handler.go index c6c3a259f..3f2460847 100644 --- a/pkg/controller/csi_handler.go +++ b/pkg/controller/csi_handler.go @@ -185,7 +185,7 @@ func (h *csiHandler) ReconcileVA() error { // setForceSync sets the intention that next time the VolumeAttachment // referenced by vaName is processed on the VA queue that attach or detach will -// proceed even when the VA.Status.Attached mayalready show the desired state +// proceed even when the VA.Status.Attached may already show the desired state func (h *csiHandler) setForceSync(vaName string) { h.forceSyncMux.Lock() defer h.forceSyncMux.Unlock() diff --git a/pkg/controller/trivial_handler.go b/pkg/controller/trivial_handler.go index d5b4aba64..faed95f99 100644 --- a/pkg/controller/trivial_handler.go +++ b/pkg/controller/trivial_handler.go @@ -25,7 +25,7 @@ import ( ) // trivialHandler is a handler that marks all VolumeAttachments as attached. -// It's used for CSI drivers that don't support ControllerPulishVolume call. +// It's used for CSI drivers that don't support ControllerPublishVolume call. // It uses no finalizer, deletion of VolumeAttachment is instant (as there is // nothing to detach). type trivialHandler struct { diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index 321c836f8..66b1de474 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -106,7 +106,7 @@ func TestGetVolumeCapabilities(t *testing.T) { expectError: false, }, { - name: "RWX + anytyhing", + name: "RWX + anything", modes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany, v1.ReadOnlyMany, v1.ReadWriteOnce}, expectedCapability: createMountCapability(defaultFSType, csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, nil), expectError: false, From 0fbdc209fc303eb234aa2737467557e8db9214c2 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Tue, 31 Mar 2020 10:03:30 +0200 Subject: [PATCH 29/30] Add CHANGELOG 2.2 --- CHANGELOG-2.2.md | 13 +++++++++++++ README.md | 5 ++--- 2 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 CHANGELOG-2.2.md diff --git a/CHANGELOG-2.2.md b/CHANGELOG-2.2.md new file mode 100644 index 000000000..9c9ac1a72 --- /dev/null +++ b/CHANGELOG-2.2.md @@ -0,0 +1,13 @@ +# Changelog since v2.1.0 + +### Bug Fixes + +- A bug that prevented the external-attacher from releasing its finalizer from a `PersistentVolume` object that was created using a legacy storage class provisioner and migrated to CSI has been fixed. ([#218](https://github.com/kubernetes-csi/external-attacher/pull/218), [@rfranzke](https://github.com/rfranzke)) +- Update package path to v2. Vendoring with dep depends on https://github.com/golang/dep/pull/1963 or the workaround described in v2/README.md. ([#209](https://github.com/kubernetes-csi/external-attacher/pull/209), [@alex1989hu](https://github.com/alex1989hu)) + + +### Other Notable Changes + +- Removed usage of annotation csi.volume.kubernetes.io/nodeid on Node objects. The external-attacher now requires Kubernetes 1.14 with feature gate CSINodeInfo enabled. ([#213](https://github.com/kubernetes-csi/external-attacher/pull/213), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) + + diff --git a/README.md b/README.md index e439626c8..9cf4a57ed 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ This information reflects the head of this branch. | Compatible with CSI Version | Container Image | Min K8s Version | Recommended K8s Version | | ------------------------------------------------------------------------------------------ | ----------------------------| --------------- | ----------------------- | -| [CSI Spec v1.2.0](https://github.com/container-storage-interface/spec/releases/tag/v1.2.0) | quay.io/k8scsi/csi-attacher | 1.15 | 1.17 | +| [CSI Spec v1.2.0](https://github.com/container-storage-interface/spec/releases/tag/v1.2.0) | quay.io/k8scsi/csi-attacher | 1.15 | 1.18 | ## Feature Status @@ -31,10 +31,9 @@ The following table reflects the head of this branch. | Feature | Status | Default | Description | | ------------- | ------- | ------- | --------------------------------------------------------------------------------------------- | -| CSINode* | Beta | On | external-attacher uses the CSINode object to get the driver's node name instead of the Node annotation. | | CSIMigration* | Beta | On | [Migrating in-tree volume plugins to CSI](https://kubernetes.io/docs/concepts/storage/volumes/#csi-migration). | -*) There are no special feature gates for these features. They are enabled by turning on the corresponding features in Kubernetes. +*) There is no special feature gate for this feature. It is enabled by turning on the corresponding features in Kubernetes. All other external-attacher features and the external-attacher itself is considered GA and fully supported. From 33c6f1c96ade916ddce2d3fabd5c58b05c81cbb8 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Wed, 1 Apr 2020 19:08:04 +0200 Subject: [PATCH 30/30] Fix Kubernetes version in README 1.15 looks like a typo, all the other docs use 1.14 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9cf4a57ed..7ca06e508 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ This information reflects the head of this branch. | Compatible with CSI Version | Container Image | Min K8s Version | Recommended K8s Version | | ------------------------------------------------------------------------------------------ | ----------------------------| --------------- | ----------------------- | -| [CSI Spec v1.2.0](https://github.com/container-storage-interface/spec/releases/tag/v1.2.0) | quay.io/k8scsi/csi-attacher | 1.15 | 1.18 | +| [CSI Spec v1.2.0](https://github.com/container-storage-interface/spec/releases/tag/v1.2.0) | quay.io/k8scsi/csi-attacher | 1.14 | 1.18 | ## Feature Status