Skip to content

Commit

Permalink
cephfs: use shallow volumes for the ROX accessMode
Browse files Browse the repository at this point in the history
this commit makes shallow volume as default feature for ROX volumes.

Signed-off-by: riya-singhal31 <rsinghal@redhat.com>
  • Loading branch information
riya-singhal31 authored and mergify[bot] committed Feb 21, 2023
1 parent 8854c85 commit b28b5e6
Show file tree
Hide file tree
Showing 7 changed files with 381 additions and 13 deletions.
2 changes: 1 addition & 1 deletion docs/deploy-cephfs.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ you're running it inside a k8s cluster and find the config itself).
| `pool` | no | Ceph pool into which volume data shall be stored |
| `volumeNamePrefix` | no | Prefix to use for naming subvolumes (defaults to `csi-vol-`). |
| `snapshotNamePrefix` | no | Prefix to use for naming snapshots (defaults to `csi-snap-`) |
| `backingSnapshot` | no | Boolean value. The PVC shall be backed by the CephFS snapshot specified in its data source. `pool` parameter must not be specified. (defaults to `false`) |
| `backingSnapshot` | no | Boolean value. The PVC shall be backed by the CephFS snapshot specified in its data source. `pool` parameter must not be specified. (defaults to `true`) |
| `kernelMountOptions` | no | Comma separated string of mount options accepted by cephfs kernel mounter, by default no options are passed. Check man mount.ceph for options. |
| `fuseMountOptions` | no | Comma separated string of mount options accepted by ceph-fuse mounter, by default no options are passed. |
| `csi.storage.k8s.io/provisioner-secret-name`, `csi.storage.k8s.io/node-stage-secret-name` | for Kubernetes | Name of the Kubernetes Secret object containing Ceph client credentials. Both parameters should have the same value |
Expand Down
132 changes: 128 additions & 4 deletions e2e/cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -1617,8 +1617,8 @@ var _ = Describe(cephfsType, func() {
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}

err = createCephfsStorageClass(f.ClientSet, f, false, map[string]string{
"backingSnapshot": "true",
"encrypted": "true",
"encryptionKMSID": kmsID,
})
Expand Down Expand Up @@ -1759,12 +1759,136 @@ var _ = Describe(cephfsType, func() {
framework.Failf("failed to get SHA512 sum for file: %v", err)
}

pvcClone, err := loadPVC(pvcClonePath)
if err != nil {
framework.Failf("failed to load PVC: %v", err)
}
// Snapshot-backed volumes support read-only access modes only.
pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}
appClone, err := loadApp(appClonePath)
if err != nil {
framework.Failf("failed to load application: %v", err)
}
appCloneLabels := map[string]string{
appKey: appCloneLabel,
}
appClone.Labels = appCloneLabels
optAppClone := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, appCloneLabels[appKey]),
}
pvcClone.Namespace = f.UniqueName
appClone.Namespace = f.UniqueName
err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout)
if err != nil {
framework.Failf("failed to create PVC and app: %v", err)
}

// Snapshot-backed volume shouldn't contribute to total subvolume count.
validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup)

// Deleting snapshot before deleting pvcClone should succeed. It will be
// deleted once all volumes that are backed by this snapshot are gone.
err = deleteSnapshot(&snap, deployTimeout)
if err != nil {
framework.Failf("failed to delete snapshot: %v", err)
}

appCloneTestFilePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"

snapFileSum, err := calculateSHA512sum(f, appClone, appCloneTestFilePath, &optAppClone)
if err != nil {
framework.Failf("failed to get SHA512 sum for file: %v", err)
}

if parentFileSum == snapFileSum {
framework.Failf("SHA512 sums of files in parent subvol and snapshot should differ")
}

err = deletePVCAndApp("", f, pvcClone, appClone)
if err != nil {
framework.Failf("failed to delete PVC or application: %v", err)
}

validateCephFSSnapshotCount(f, 0, subvolumegroup, pv)

err = deletePVCAndApp("", f, pvc, app)
if err != nil {
framework.Failf("failed to delete PVC or application: %v", err)
}

err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}

err = createCephfsStorageClass(f.ClientSet, f, false, nil)
if err != nil {
framework.Failf("failed to create CephFS storageclass: %v", err)
}
})

By("checking snapshot-backed volume by backing snapshot as false", func() {
pvc, err := loadPVC(pvcPath)
if err != nil {
framework.Failf("failed to load PVC: %v", err)
}
pvc.Namespace = f.UniqueName
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
framework.Failf("failed to create PVC: %v", err)
}

_, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace)
if err != nil {
framework.Failf("failed to get PV object for %s: %v", pvc.Name, err)
}

app, err := loadApp(appPath)
if err != nil {
framework.Failf("failed to load application: %v", err)
}
app.Namespace = f.UniqueName
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
appLabels := map[string]string{
appKey: appLabel,
}
app.Labels = appLabels
optApp := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", appKey, appLabels[appKey]),
}
err = writeDataInPod(app, &optApp, f)
if err != nil {
framework.Failf("failed to write data: %v", err)
}

appTestFilePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"

snap := getSnapshot(snapshotPath)
snap.Namespace = f.UniqueName
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
err = createSnapshot(&snap, deployTimeout)
if err != nil {
framework.Failf("failed to create snapshot: %v", err)
}
validateCephFSSnapshotCount(f, 1, subvolumegroup, pv)

err = appendToFileInContainer(f, app, appTestFilePath, "hello", &optApp)
if err != nil {
framework.Failf("failed to append data: %v", err)
}

parentFileSum, err := calculateSHA512sum(f, app, appTestFilePath, &optApp)
if err != nil {
framework.Failf("failed to get SHA512 sum for file: %v", err)
}

err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}

err = createCephfsStorageClass(f.ClientSet, f, false, map[string]string{
"backingSnapshot": "true",
"backingSnapshot": "false",
})
if err != nil {
framework.Failf("failed to create CephFS storageclass: %v", err)
Expand Down Expand Up @@ -1794,15 +1918,15 @@ var _ = Describe(cephfsType, func() {
framework.Failf("failed to create PVC and app: %v", err)
}

// Snapshot-backed volume shouldn't contribute to total subvolume count.
validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup)
validateSubvolumeCount(f, 2, fileSystemName, subvolumegroup)

// Deleting snapshot before deleting pvcClone should succeed. It will be
// deleted once all volumes that are backed by this snapshot are gone.
err = deleteSnapshot(&snap, deployTimeout)
if err != nil {
framework.Failf("failed to delete snapshot: %v", err)
}
validateCephFSSnapshotCount(f, 0, subvolumegroup, pv)

appCloneTestFilePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test"

Expand Down
4 changes: 2 additions & 2 deletions examples/cephfs/storageclass.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ parameters:

# (optional) Boolean value. The PVC shall be backed by the CephFS snapshot
# specified in its data source. `pool` parameter must not be specified.
# (defaults to `false`)
# backingSnapshot: "true"
# (defaults to `true`)
# backingSnapshot: "false"

# (optional) Instruct the plugin it has to encrypt the volume
# By default it is disabled. Valid values are "true" or "false".
Expand Down
9 changes: 3 additions & 6 deletions internal/cephfs/controllerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,12 +230,9 @@ func checkValidCreateVolumeRequest(
case sID != nil:
if vol.BackingSnapshot {
volCaps := req.GetVolumeCapabilities()
for _, volCap := range volCaps {
mode := volCap.AccessMode.Mode
if mode != csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY &&
mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
return errors.New("backingSnapshot may be used only with read-only access modes")
}
isRO := store.IsVolumeCreateRO(volCaps)
if !isRO {
return errors.New("backingSnapshot may be used only with read-only access modes")
}
}
}
Expand Down
23 changes: 23 additions & 0 deletions internal/cephfs/store/volumeoptions.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ func NewVolumeOptions(
opts.Monitors = strings.Join(clusterData.Monitors, ",")
opts.SubvolumeGroup = clusterData.CephFS.SubvolumeGroup
opts.Owner = k8s.GetOwner(volOptions)
opts.BackingSnapshot = IsShallowVolumeSupported(req)

if err = extractOptionalOption(&opts.Pool, "pool", volOptions); err != nil {
return nil, err
Expand Down Expand Up @@ -323,6 +324,28 @@ func NewVolumeOptions(
return &opts, nil
}

// IsShallowVolumeSupported returns true only for ReadOnly volume requests
// with datasource as snapshot.
func IsShallowVolumeSupported(req *csi.CreateVolumeRequest) bool {
isRO := IsVolumeCreateRO(req.VolumeCapabilities)

return isRO && (req.GetVolumeContentSource() != nil && req.GetVolumeContentSource().GetSnapshot() != nil)
}

func IsVolumeCreateRO(caps []*csi.VolumeCapability) bool {
for _, cap := range caps {
if cap.AccessMode != nil {
switch cap.AccessMode.Mode { //nolint:exhaustive // only check what we want
case csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY:
return true
}
}
}

return false
}

// newVolumeOptionsFromVolID generates a new instance of volumeOptions and VolumeIdentifier
// from the provided CSI VolumeID.
// nolint:gocyclo,cyclop // TODO: reduce complexity
Expand Down
Loading

0 comments on commit b28b5e6

Please sign in to comment.