diff --git a/docs/deploy-cephfs.md b/docs/deploy-cephfs.md index d90fcec2d0e..8f2bca2a24b 100644 --- a/docs/deploy-cephfs.md +++ b/docs/deploy-cephfs.md @@ -70,7 +70,7 @@ you're running it inside a k8s cluster and find the config itself). | `pool` | no | Ceph pool into which volume data shall be stored | | `volumeNamePrefix` | no | Prefix to use for naming subvolumes (defaults to `csi-vol-`). | | `snapshotNamePrefix` | no | Prefix to use for naming snapshots (defaults to `csi-snap-`) | -| `backingSnapshot` | no | Boolean value. The PVC shall be backed by the CephFS snapshot specified in its data source. `pool` parameter must not be specified. (defaults to `false`) | +| `backingSnapshot` | no | Boolean value. The PVC shall be backed by the CephFS snapshot specified in its data source. `pool` parameter must not be specified. (defaults to `true`) | | `kernelMountOptions` | no | Comma separated string of mount options accepted by cephfs kernel mounter, by default no options are passed. Check man mount.ceph for options. | | `fuseMountOptions` | no | Comma separated string of mount options accepted by ceph-fuse mounter, by default no options are passed. | | `csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-stage-secret-name` | for Kubernetes | Name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value | diff --git a/e2e/cephfs.go b/e2e/cephfs.go index 8311b76e163..37a09b85fc1 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -1617,8 +1617,8 @@ var _ = Describe(cephfsType, func() { if err != nil { framework.Failf("failed to delete CephFS storageclass: %v", err) } + err = createCephfsStorageClass(f.ClientSet, f, false, map[string]string{ - "backingSnapshot": "true", "encrypted": "true", "encryptionKMSID": kmsID, }) @@ -1759,12 +1759,136 @@ var _ = Describe(cephfsType, func() { framework.Failf("failed to get SHA512 sum for file: %v", err) } + pvcClone, err := loadPVC(pvcClonePath) + if err != nil { + framework.Failf("failed to load PVC: %v", err) + } + // Snapshot-backed volumes support read-only access modes only. + pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} + appClone, err := loadApp(appClonePath) + if err != nil { + framework.Failf("failed to load application: %v", err) + } + appCloneLabels := map[string]string{ + appKey: appCloneLabel, + } + appClone.Labels = appCloneLabels + optAppClone := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", appKey, appCloneLabels[appKey]), + } + pvcClone.Namespace = f.UniqueName + appClone.Namespace = f.UniqueName + err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) + if err != nil { + framework.Failf("failed to create PVC and app: %v", err) + } + + // Snapshot-backed volume shouldn't contribute to total subvolume count. + validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) + + // Deleting snapshot before deleting pvcClone should succeed. It will be + // deleted once all volumes that are backed by this snapshot are gone. + err = deleteSnapshot(&snap, deployTimeout) + if err != nil { + framework.Failf("failed to delete snapshot: %v", err) + } + + appCloneTestFilePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + + snapFileSum, err := calculateSHA512sum(f, appClone, appCloneTestFilePath, &optAppClone) + if err != nil { + framework.Failf("failed to get SHA512 sum for file: %v", err) + } + + if parentFileSum == snapFileSum { + framework.Failf("SHA512 sums of files in parent subvol and snapshot should differ") + } + + err = deletePVCAndApp("", f, pvcClone, appClone) + if err != nil { + framework.Failf("failed to delete PVC or application: %v", err) + } + + validateCephFSSnapshotCount(f, 0, subvolumegroup, pv) + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + framework.Failf("failed to delete PVC or application: %v", err) + } + err = deleteResource(cephFSExamplePath + "storageclass.yaml") if err != nil { framework.Failf("failed to delete CephFS storageclass: %v", err) } + + err = createCephfsStorageClass(f.ClientSet, f, false, nil) + if err != nil { + framework.Failf("failed to create CephFS storageclass: %v", err) + } + }) + + By("checking snapshot-backed volume by backing snapshot as false", func() { + pvc, err := loadPVC(pvcPath) + if err != nil { + framework.Failf("failed to load PVC: %v", err) + } + pvc.Namespace = f.UniqueName + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + framework.Failf("failed to create PVC: %v", err) + } + + _, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + if err != nil { + framework.Failf("failed to get PV object for %s: %v", pvc.Name, err) + } + + app, err := loadApp(appPath) + if err != nil { + framework.Failf("failed to load application: %v", err) + } + app.Namespace = f.UniqueName + app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name + appLabels := map[string]string{ + appKey: appLabel, + } + app.Labels = appLabels + optApp := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", appKey, appLabels[appKey]), + } + err = writeDataInPod(app, &optApp, f) + if err != nil { + framework.Failf("failed to write data: %v", err) + } + + appTestFilePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + + snap := getSnapshot(snapshotPath) + snap.Namespace = f.UniqueName + snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name + err = createSnapshot(&snap, deployTimeout) + if err != nil { + framework.Failf("failed to create snapshot: %v", err) + } + validateCephFSSnapshotCount(f, 1, subvolumegroup, pv) + + err = appendToFileInContainer(f, app, appTestFilePath, "hello", &optApp) + if err != nil { + framework.Failf("failed to append data: %v", err) + } + + parentFileSum, err := calculateSHA512sum(f, app, appTestFilePath, &optApp) + if err != nil { + framework.Failf("failed to get SHA512 sum for file: %v", err) + } + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, false, map[string]string{ - "backingSnapshot": "true", + "backingSnapshot": "false", }) if err != nil { framework.Failf("failed to create CephFS storageclass: %v", err) @@ -1794,8 +1918,7 @@ var _ = Describe(cephfsType, func() { framework.Failf("failed to create PVC and app: %v", err) } - // Snapshot-backed volume shouldn't contribute to total subvolume count. - validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) + validateSubvolumeCount(f, 2, fileSystemName, subvolumegroup) // Deleting snapshot before deleting pvcClone should succeed. It will be // deleted once all volumes that are backed by this snapshot are gone. @@ -1803,6 +1926,7 @@ var _ = Describe(cephfsType, func() { if err != nil { framework.Failf("failed to delete snapshot: %v", err) } + validateCephFSSnapshotCount(f, 0, subvolumegroup, pv) appCloneTestFilePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" diff --git a/examples/cephfs/storageclass.yaml b/examples/cephfs/storageclass.yaml index 4dab9ea2dd4..4c16bc2e777 100644 --- a/examples/cephfs/storageclass.yaml +++ b/examples/cephfs/storageclass.yaml @@ -49,8 +49,8 @@ parameters: # (optional) Boolean value. The PVC shall be backed by the CephFS snapshot # specified in its data source. `pool` parameter must not be specified. - # (defaults to `false`) - # backingSnapshot: "true" + # (defaults to `true`) + # backingSnapshot: "false" # (optional) Instruct the plugin it has to encrypt the volume # By default it is disabled. Valid values are "true" or "false". diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index 0544d2585a6..96641690848 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -230,12 +230,9 @@ func checkValidCreateVolumeRequest( case sID != nil: if vol.BackingSnapshot { volCaps := req.GetVolumeCapabilities() - for _, volCap := range volCaps { - mode := volCap.AccessMode.Mode - if mode != csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY && - mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY { - return errors.New("backingSnapshot may be used only with read-only access modes") - } + isRO := store.IsVolumeCreateRO(volCaps) + if !isRO { + return errors.New("backingSnapshot may be used only with read-only access modes") } } } diff --git a/internal/cephfs/store/volumeoptions.go b/internal/cephfs/store/volumeoptions.go index 24e96655cf7..8ff10cbf5a5 100644 --- a/internal/cephfs/store/volumeoptions.go +++ b/internal/cephfs/store/volumeoptions.go @@ -235,6 +235,7 @@ func NewVolumeOptions( opts.Monitors = strings.Join(clusterData.Monitors, ",") opts.SubvolumeGroup = clusterData.CephFS.SubvolumeGroup opts.Owner = k8s.GetOwner(volOptions) + opts.BackingSnapshot = IsShallowVolumeSupported(req) if err = extractOptionalOption(&opts.Pool, "pool", volOptions); err != nil { return nil, err @@ -323,6 +324,28 @@ func NewVolumeOptions( return &opts, nil } +// IsShallowVolumeSupported returns true only for ReadOnly volume requests +// with datasource as snapshot. +func IsShallowVolumeSupported(req *csi.CreateVolumeRequest) bool { + isRO := IsVolumeCreateRO(req.VolumeCapabilities) + + return isRO && (req.GetVolumeContentSource() != nil && req.GetVolumeContentSource().GetSnapshot() != nil) +} + +func IsVolumeCreateRO(caps []*csi.VolumeCapability) bool { + for _, cap := range caps { + if cap.AccessMode != nil { + switch cap.AccessMode.Mode { //nolint:exhaustive // only check what we want + case csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY: + return true + } + } + } + + return false +} + // newVolumeOptionsFromVolID generates a new instance of volumeOptions and VolumeIdentifier // from the provided CSI VolumeID. // nolint:gocyclo,cyclop // TODO: reduce complexity diff --git a/internal/cephfs/store/volumeoptions_test.go b/internal/cephfs/store/volumeoptions_test.go new file mode 100644 index 00000000000..fb08ed3d6f8 --- /dev/null +++ b/internal/cephfs/store/volumeoptions_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2023 The Ceph-CSI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +func TestIsVolumeCreateRO(t *testing.T) { + t.Parallel() + tests := []struct { + name string + caps []*csi.VolumeCapability + isRO bool + }{ + { + name: "valid access mode", + caps: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, + }, + }, + }, + isRO: true, + }, + { + name: "Invalid access mode", + caps: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER, + }, + }, + }, + isRO: false, + }, + { + name: "valid access mode", + caps: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, + }, + }, + }, + isRO: true, + }, + { + name: "Invalid access mode", + caps: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + }, + }, + isRO: false, + }, + { + name: "Invalid access mode", + caps: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER, + }, + }, + }, + isRO: false, + }, + } + for _, tt := range tests { + newtt := tt + t.Run(newtt.name, func(t *testing.T) { + t.Parallel() + wantErr := IsVolumeCreateRO(newtt.caps) + if wantErr != newtt.isRO { + t.Errorf("isVolumeCreateRO() wantErr = %v, isRO %v", wantErr, newtt.isRO) + } + }) + } +} + +func TestIsShallowVolumeSupported(t *testing.T) { + t.Parallel() + type args struct { + req *csi.CreateVolumeRequest + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "Invalid request", + args: args{ + req: &csi.CreateVolumeRequest{ + Name: "", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Block{}, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER, + }, + }, + }, + + VolumeContentSource: &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: "vol", + }, + }, + }, + }, + }, + want: false, + }, + { + name: "Invalid request", + args: args{ + req: &csi.CreateVolumeRequest{ + Name: "", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Block{}, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, + }, + }, + }, + + VolumeContentSource: &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: "vol", + }, + }, + }, + }, + }, + want: false, + }, + { + name: "Invalid request", + args: args{ + req: &csi.CreateVolumeRequest{ + Name: "", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Block{}, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + }, + }, + + VolumeContentSource: &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: "snap", + }, + }, + }, + }, + }, + want: false, + }, + { + name: "valid request", + args: args{ + req: &csi.CreateVolumeRequest{ + Name: "", + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Block{}, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, + }, + }, + }, + + VolumeContentSource: &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: "snap", + }, + }, + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + newtt := tt + t.Run(newtt.name, func(t *testing.T) { + t.Log(newtt.args.req.GetVolumeContentSource().GetSnapshot()) + t.Log(IsVolumeCreateRO(newtt.args.req.GetVolumeCapabilities())) + t.Parallel() + if got := IsShallowVolumeSupported(newtt.args.req); got != newtt.want { + t.Errorf("IsShallowVolumeSupported() = %v, want %v", got, newtt.want) + } + }) + } +} diff --git a/internal/nfs/controller/controllerserver.go b/internal/nfs/controller/controllerserver.go index 0b3b8c61db9..36a55a9bd96 100644 --- a/internal/nfs/controller/controllerserver.go +++ b/internal/nfs/controller/controllerserver.go @@ -77,6 +77,8 @@ func (cs *Server) CreateVolume( ctx context.Context, req *csi.CreateVolumeRequest, ) (*csi.CreateVolumeResponse, error) { + // nfs does not supports shallow snapshots + req.Parameters["backingSnapshot"] = "false" res, err := cs.backendServer.CreateVolume(ctx, req) if err != nil { return nil, err