diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml index 71bc300ec3d..0d805771c1c 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml @@ -117,6 +117,12 @@ spec: fieldPath: spec.nodeName - name: CSI_ENDPOINT value: unix:///csi/csi-provisioner.sock + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # - name: KMS_CONFIGMAP_NAME + # value: encryptionConfig imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir @@ -134,6 +140,8 @@ spec: mountPath: /etc/ceph-csi-config/ - name: keys-tmp-dir mountPath: /tmp/csi/keys + - name: ceph-csi-encryption-kms-config + mountPath: /etc/ceph-csi-encryption-kms-config/ - name: liveness-prometheus image: quay.io/cephcsi/cephcsi:canary args: @@ -178,3 +186,6 @@ spec: emptyDir: { medium: "Memory" } + - name: ceph-csi-encryption-kms-config + configMap: + name: ceph-csi-encryption-kms-config diff --git a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml index ce8c2b29dc1..977409d2dce 100644 --- a/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml +++ b/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml @@ -74,6 +74,12 @@ spec: fieldPath: spec.nodeName - name: CSI_ENDPOINT value: unix:///csi/csi.sock + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # - name: KMS_CONFIGMAP_NAME + # value: encryptionConfig imagePullPolicy: "IfNotPresent" volumeMounts: - name: socket-dir @@ -104,6 +110,8 @@ spec: mountPath: /tmp/csi/keys - name: ceph-csi-mountinfo mountPath: /csi/mountinfo + - name: ceph-csi-encryption-kms-config + mountPath: /etc/ceph-csi-encryption-kms-config/ - name: liveness-prometheus securityContext: privileged: true @@ -173,6 +181,9 @@ spec: hostPath: path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/mountinfo type: DirectoryOrCreate + - name: ceph-csi-encryption-kms-config + configMap: + name: ceph-csi-encryption-kms-config --- # This is a service to expose the liveness metrics apiVersion: v1 diff --git a/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml b/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml index a1ee7d1a04d..c1833d044da 100644 --- a/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yaml @@ -3,3 +3,36 @@ apiVersion: v1 kind: ServiceAccount metadata: name: cephfs-csi-nodeplugin + namespace: default +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts/token"] + verbs: ["create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin +subjects: + - kind: ServiceAccount + name: cephfs-csi-nodeplugin + # replace with non-default namespace name + namespace: default +roleRef: + kind: ClusterRole + name: cephfs-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml index 93794b236a3..945e9560579 100644 --- a/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml +++ b/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml @@ -3,6 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: cephfs-csi-provisioner + namespace: default --- kind: ClusterRole @@ -10,9 +11,12 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: cephfs-external-provisioner-runner rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["secrets"] - verbs: ["get", "list"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] @@ -22,6 +26,9 @@ rules: - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] @@ -37,15 +44,21 @@ rules: - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims/status"] - verbs: ["update", "patch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get"] + - apiGroups: [""] + resources: ["serviceaccounts/token"] + verbs: ["create"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 @@ -68,6 +81,9 @@ metadata: namespace: default name: cephfs-external-provisioner-cfg rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] diff --git a/e2e/cephfs.go b/e2e/cephfs.go index a058eeab9d7..c09f96b477d 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -203,6 +203,7 @@ var _ = Describe(cephfsType, func() { if err != nil { e2elog.Failf("failed to create node secret: %v", err) } + deployVault(f.ClientSet, deployTimeout) // wait for cluster name update in deployment containers := []string{cephFSContainerName} @@ -248,6 +249,8 @@ var _ = Describe(cephfsType, func() { if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) } + deleteVault() + if deployCephFS { deleteCephfsPlugin() if cephCSINamespace != defaultNs { @@ -414,6 +417,67 @@ var _ = Describe(cephfsType, func() { } }) + if testCephFSFscrypt { + kmsToTest := map[string]kmsConfig{ + "secrets-metadata-test": secretsMetadataKMS, + "vault-test": vaultKMS, + "vault-tokens-test": vaultTokensKMS, + "vault-tenant-sa-test": vaultTenantSAKMS, + } + + for kmsID, kmsConf := range kmsToTest { + kmsID := kmsID + kmsConf := kmsConf + By("create a storageclass with pool and an encrypted PVC then bind it to an app with "+kmsID, func() { + scOpts := map[string]string{ + "encrypted": "true", + "encryptionKMSID": kmsID, + } + err := createCephfsStorageClass(f.ClientSet, f, true, scOpts) + if err != nil { + e2elog.Failf("failed to create CephFS storageclass: %v", err) + } + + if kmsID == "vault-tokens-test" { + var token v1.Secret + tenant := f.UniqueName + token, err = getSecret(vaultExamplePath + "tenant-token.yaml") + if err != nil { + e2elog.Failf("failed to load tenant token from secret: %v", err) + } + _, err = c.CoreV1().Secrets(tenant).Create(context.TODO(), &token, metav1.CreateOptions{}) + if err != nil { + e2elog.Failf("failed to create Secret with tenant token: %v", err) + } + defer func() { + err = c.CoreV1().Secrets(tenant).Delete(context.TODO(), token.Name, metav1.DeleteOptions{}) + if err != nil { + e2elog.Failf("failed to delete Secret with tenant token: %v", err) + } + }() + + } + if kmsID == "vault-tenant-sa-test" { + err = createTenantServiceAccount(f.ClientSet, f.UniqueName) + if err != nil { + e2elog.Failf("failed to create ServiceAccount: %v", err) + } + defer deleteTenantServiceAccount(f.UniqueName) + } + + err = validateFscryptAndAppBinding(pvcPath, appPath, kmsConf, f) + if err != nil { + e2elog.Failf("failed to validate CephFS pvc and application binding: %v", err) + } + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass: %v", err) + } + }) + } + } + By("create a PVC and check PVC/PV metadata on CephFS subvolume", func() { err := createCephfsStorageClass(f.ClientSet, f, true, nil) if err != nil { @@ -1477,6 +1541,164 @@ var _ = Describe(cephfsType, func() { } }) + if testCephFSFscrypt { + for _, kmsID := range []string{"secrets-metadata-test", "vault-test"} { + kmsID := kmsID + By("checking encrypted snapshot-backed volume with KMS "+kmsID, func() { + err := deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + + scOpts := map[string]string{ + "encrypted": "true", + "encryptionKMSID": kmsID, + } + + err = createCephfsStorageClass(f.ClientSet, f, true, scOpts) + if err != nil { + e2elog.Failf("failed to create CephFS storageclass: %v", err) + } + + err = createCephFSSnapshotClass(f) + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass: %v", err) + } + + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC: %v", err) + } + pvc.Namespace = f.UniqueName + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC: %v", err) + } + + app, err := loadApp(appPath) + if err != nil { + e2elog.Failf("failed to load application: %v", err) + } + app.Namespace = f.UniqueName + app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name + appLabels := map[string]string{ + appKey: appLabel, + } + app.Labels = appLabels + optApp := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", appKey, appLabels[appKey]), + } + err = writeDataInPod(app, &optApp, f) + if err != nil { + e2elog.Failf("failed to write data: %v", err) + } + + appTestFilePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + + snap := getSnapshot(snapshotPath) + snap.Namespace = f.UniqueName + snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name + err = createSnapshot(&snap, deployTimeout) + if err != nil { + e2elog.Failf("failed to create snapshot: %v", err) + } + + err = appendToFileInContainer(f, app, appTestFilePath, "hello", &optApp) + if err != nil { + e2elog.Failf("failed to append data: %v", err) + } + + parentFileSum, err := calculateSHA512sum(f, app, appTestFilePath, &optApp) + if err != nil { + e2elog.Failf("failed to get SHA512 sum for file: %v", err) + } + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass: %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, false, map[string]string{ + "backingSnapshot": "true", + "encrypted": "true", + "encryptionKMSID": kmsID, + }) + if err != nil { + e2elog.Failf("failed to create CephFS storageclass: %v", err) + } + + pvcClone, err := loadPVC(pvcClonePath) + if err != nil { + e2elog.Failf("failed to load PVC: %v", err) + } + // Snapshot-backed volumes support read-only access modes only. + pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} + appClone, err := loadApp(appClonePath) + if err != nil { + e2elog.Failf("failed to load application: %v", err) + } + appCloneLabels := map[string]string{ + appKey: appCloneLabel, + } + appClone.Labels = appCloneLabels + optAppClone := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", appKey, appCloneLabels[appKey]), + } + pvcClone.Namespace = f.UniqueName + appClone.Namespace = f.UniqueName + err = createPVCAndApp("", f, pvcClone, appClone, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC and app: %v", err) + } + + // Snapshot-backed volume shouldn't contribute to total subvolume count. + validateSubvolumeCount(f, 1, fileSystemName, subvolumegroup) + + // Deleting snapshot before deleting pvcClone should succeed. It will be + // deleted once all volumes that are backed by this snapshot are gone. + err = deleteSnapshot(&snap, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete snapshot: %v", err) + } + + appCloneTestFilePath := appClone.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + + snapFileSum, err := calculateSHA512sum(f, appClone, appCloneTestFilePath, &optAppClone) + if err != nil { + e2elog.Failf("failed to get SHA512 sum for file: %v", err) + } + + if parentFileSum == snapFileSum { + e2elog.Failf("SHA512 sums of files in parent subvol and snapshot should differ") + } + + err = deletePVCAndApp("", f, pvcClone, appClone) + if err != nil { + e2elog.Failf("failed to delete PVC or application: %v", err) + } + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + e2elog.Failf("failed to delete PVC or application: %v", err) + } + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass: %v", err) + } + + err = deleteResource(cephFSExamplePath + "snapshotclass.yaml") + if err != nil { + e2elog.Failf("failed to delete CephFS snapshotclass: %v", err) + } + + err = createCephfsStorageClass(f.ClientSet, f, false, nil) + if err != nil { + e2elog.Failf("failed to create CephFS storageclass: %v", err) + } + }) + } + } + By("checking snapshot-backed volume", func() { err := createCephFSSnapshotClass(f) if err != nil { @@ -1616,6 +1838,44 @@ var _ = Describe(cephfsType, func() { } }) + if testCephFSFscrypt { + kmsToTest := map[string]kmsConfig{ + "secrets-metadata-test": secretsMetadataKMS, + "vault-test": vaultKMS, + } + for kmsID, kmsConf := range kmsToTest { + kmsID := kmsID + kmsConf := kmsConf + By("create an encrypted PVC-PVC clone and bind it to an app with "+kmsID, func() { + err := deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + + scOpts := map[string]string{ + "encrypted": "true", + "encryptionKMSID": kmsID, + } + + err = createCephfsStorageClass(f.ClientSet, f, true, scOpts) + if err != nil { + e2elog.Failf("failed to create CephFS storageclass: %v", err) + } + + validateFscryptClone(pvcPath, appPath, pvcSmartClonePath, appSmartClonePath, kmsConf, f) + + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass: %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, false, nil) + if err != nil { + e2elog.Failf("failed to create CephFS storageclass: %v", err) + } + }) + } + } + By("create a PVC-PVC clone and bind it to an app", func() { var wg sync.WaitGroup totalCount := 3 diff --git a/e2e/cephfs_helper.go b/e2e/cephfs_helper.go index dd0a58124ff..23a1026344a 100644 --- a/e2e/cephfs_helper.go +++ b/e2e/cephfs_helper.go @@ -373,3 +373,198 @@ func deleteBackingCephFSSubvolumeSnapshot( return nil } + +func validateEncryptedCephfs(f *framework.Framework, pvName, appName string) error { + pod, err := f.ClientSet.CoreV1().Pods(f.UniqueName).Get(context.TODO(), appName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get pod %q in namespace %q: %w", appName, f.UniqueName, err) + } + volumeMountPath := fmt.Sprintf( + "/var/lib/kubelet/pods/%s/volumes/kubernetes.io~csi/%s/mount", + pod.UID, + pvName) + + selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, cephFSDeamonSetName) + if err != nil { + return fmt.Errorf("failed to get labels: %w", err) + } + opt := metav1.ListOptions{ + LabelSelector: selector, + } + + cmd := fmt.Sprintf("getfattr --name=ceph.fscrypt.auth --only-values %s", volumeMountPath) + _, _, err = execCommandInContainer(f, cmd, cephCSINamespace, "csi-cephfsplugin", &opt) + if err != nil { + cmd = fmt.Sprintf("getfattr --recursive --dump %s", volumeMountPath) + stdOut, stdErr, listErr := execCommandInContainer(f, cmd, cephCSINamespace, "csi-cephfsplugin", &opt) + if listErr == nil { + return fmt.Errorf("error checking for cephfs fscrypt xattr on %q. listing: %s %s", + volumeMountPath, stdOut, stdErr) + } + + return fmt.Errorf("error checking file xattr: %w", err) + } + + return nil +} + +func getInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (string, string, error) { + c := f.ClientSet.CoreV1() + pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + if err != nil { + return "", "", fmt.Errorf("failed to get pvc: %w", err) + } + + pv, err := c.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + return "", "", fmt.Errorf("failed to get pv: %w", err) + } + + return pv.Name, pv.Spec.CSI.VolumeHandle, nil +} + +func validateFscryptAndAppBinding(pvcPath, appPath string, kms kmsConfig, f *framework.Framework) error { + pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) + if err != nil { + return err + } + + pvName, csiVolumeHandle, err := getInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + err = validateEncryptedCephfs(f, pvName, app.Name) + if err != nil { + return err + } + + if kms != noKMS && kms.canGetPassphrase() { + // check new passphrase created + _, stdErr := kms.getPassphrase(f, csiVolumeHandle) + if stdErr != "" { + return fmt.Errorf("failed to read passphrase from vault: %s", stdErr) + } + } + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + return err + } + + if kms != noKMS && kms.canGetPassphrase() { + // check new passphrase created + stdOut, _ := kms.getPassphrase(f, csiVolumeHandle) + if stdOut != "" { + return fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut) + } + } + + if kms != noKMS && kms.canVerifyKeyDestroyed() { + destroyed, msg := kms.verifyKeyDestroyed(f, csiVolumeHandle) + if !destroyed { + return fmt.Errorf("passphrased was not destroyed: %s", msg) + } else if msg != "" { + e2elog.Logf("passphrase destroyed, but message returned: %s", msg) + } + } + + return nil +} + +//nolint:gocyclo,cyclop // test function +func validateFscryptClone( + pvcPath, appPath, pvcSmartClonePath, appSmartClonePath string, + kms kmsConfig, + f *framework.Framework, +) { + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC: %v", err) + } + + pvc.Namespace = f.UniqueName + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC: %v", err) + } + app, err := loadApp(appPath) + if err != nil { + e2elog.Failf("failed to load application: %v", err) + } + label := make(map[string]string) + label[appKey] = appLabel + app.Namespace = f.UniqueName + app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name + app.Labels = label + opt := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", appKey, label[appKey]), + } + wErr := writeDataInPod(app, &opt, f) + if wErr != nil { + e2elog.Failf("failed to write data from application %v", wErr) + } + + pvcClone, err := loadPVC(pvcSmartClonePath) + if err != nil { + e2elog.Failf("failed to load PVC: %v", err) + } + pvcClone.Spec.DataSource.Name = pvc.Name + pvcClone.Namespace = f.UniqueName + appClone, err := loadApp(appSmartClonePath) + if err != nil { + e2elog.Failf("failed to load application: %v", err) + } + appClone.Namespace = f.UniqueName + appClone.Labels = map[string]string{ + appKey: f.UniqueName, + } + + err = createPVCAndApp(f.UniqueName, f, pvcClone, appClone, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC or application (%s): %v", f.UniqueName, err) + } + + _, csiVolumeHandle, err := getInfoFromPVC(pvcClone.Namespace, pvcClone.Name, f) + if err != nil { + e2elog.Failf("failed to get pvc info: %s", err) + } + + if kms != noKMS && kms.canGetPassphrase() { + // check new passphrase created + stdOut, stdErr := kms.getPassphrase(f, csiVolumeHandle) + if stdOut != "" { + e2elog.Logf("successfully read the passphrase from vault: %s", stdOut) + } + if stdErr != "" { + e2elog.Failf("failed to read passphrase from vault: %s", stdErr) + } + } + + // delete parent pvc + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + e2elog.Failf("failed to delete PVC or application: %v", err) + } + + err = deletePVCAndApp(f.UniqueName, f, pvcClone, appClone) + if err != nil { + e2elog.Failf("failed to delete PVC or application (%s): %v", f.UniqueName, err) + } + + if kms != noKMS && kms.canGetPassphrase() { + // check passphrase deleted + stdOut, _ := kms.getPassphrase(f, csiVolumeHandle) + if stdOut != "" { + e2elog.Failf("passphrase found in vault while should be deleted: %s", stdOut) + } + } + + if kms != noKMS && kms.canVerifyKeyDestroyed() { + destroyed, msg := kms.verifyKeyDestroyed(f, csiVolumeHandle) + if !destroyed { + e2elog.Failf("passphrased was not destroyed: %s", msg) + } else if msg != "" { + e2elog.Logf("passphrase destroyed, but message returned: %s", msg) + } + } +} diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go index 40745d8b5ad..e4538788aeb 100644 --- a/e2e/e2e_test.go +++ b/e2e/e2e_test.go @@ -38,6 +38,7 @@ func init() { flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver") flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver") flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver") + flag.BoolVar(&testCephFSFscrypt, "test-cephfs-fscrypt", false, "test CephFS csi driver fscrypt support") flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver") flag.BoolVar(&testRBDFSCrypt, "test-rbd-fscrypt", false, "test rbd csi driver fscrypt support") flag.BoolVar(&testNBD, "test-nbd", false, "test rbd csi driver with rbd-nbd mounter") diff --git a/e2e/upgrade-cephfs.go b/e2e/upgrade-cephfs.go index 6eb233972d2..83a19564cfc 100644 --- a/e2e/upgrade-cephfs.go +++ b/e2e/upgrade-cephfs.go @@ -74,6 +74,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { if err != nil { e2elog.Failf("failed to getwd: %v", err) } + deployVault(f.ClientSet, deployTimeout) err = upgradeAndDeployCSI(upgradeVersion, "cephfs") if err != nil { e2elog.Failf("failed to upgrade csi: %v", err) @@ -150,6 +151,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { if err != nil { e2elog.Failf("failed to delete storageclass: %v", err) } + deleteVault() if deployCephFS { deleteCephfsPlugin() if cephCSINamespace != defaultNs { diff --git a/e2e/utils.go b/e2e/utils.go index d1ad533dcd5..e2ef7950e8d 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -79,25 +79,26 @@ const ( var ( // cli flags. - deployTimeout int - deployCephFS bool - deployRBD bool - deployNFS bool - testCephFS bool - testRBD bool - testRBDFSCrypt bool - testNBD bool - testNFS bool - helmTest bool - upgradeTesting bool - upgradeVersion string - cephCSINamespace string - rookNamespace string - radosNamespace string - poll = 2 * time.Second - isOpenShift bool - clusterID string - nfsDriverName string + deployTimeout int + deployCephFS bool + deployRBD bool + deployNFS bool + testCephFS bool + testCephFSFscrypt bool + testRBD bool + testRBDFSCrypt bool + testNBD bool + testNFS bool + helmTest bool + upgradeTesting bool + upgradeVersion string + cephCSINamespace string + rookNamespace string + radosNamespace string + poll = 2 * time.Second + isOpenShift bool + clusterID string + nfsDriverName string ) type cephfsFilesystem struct { diff --git a/examples/cephfs/secret.yaml b/examples/cephfs/secret.yaml index 6cb1f8f8c5f..0eb3707811a 100644 --- a/examples/cephfs/secret.yaml +++ b/examples/cephfs/secret.yaml @@ -12,3 +12,6 @@ stringData: # Required for dynamically provisioned volumes adminID: adminKey: <Ceph auth key corresponding to ID above> + + # Encryption passphrase + encryptionPassphrase: test_passphrase diff --git a/examples/cephfs/storageclass.yaml b/examples/cephfs/storageclass.yaml index b23fa0213ce..4dab9ea2dd4 100644 --- a/examples/cephfs/storageclass.yaml +++ b/examples/cephfs/storageclass.yaml @@ -52,6 +52,17 @@ parameters: # (defaults to `false`) # backingSnapshot: "true" + # (optional) Instruct the plugin it has to encrypt the volume + # By default it is disabled. Valid values are "true" or "false". + # A string is expected here, i.e. "true", not true. + # encrypted: "true" + + # (optional) Use external key management system for encryption passphrases by + # specifying a unique ID matching KMS ConfigMap. The ID is only used for + # correlation to configmap entry. + # encryptionKMSID: <kms-config-id> + + reclaimPolicy: Delete allowVolumeExpansion: true mountOptions: diff --git a/examples/kms/vault/vault.yaml b/examples/kms/vault/vault.yaml index 379b8897a77..8621a8b7361 100644 --- a/examples/kms/vault/vault.yaml +++ b/examples/kms/vault/vault.yaml @@ -169,7 +169,7 @@ spec: - name: PLUGIN_ROLE value: csi-kubernetes - name: SERVICE_ACCOUNTS - value: rbd-csi-nodeplugin,rbd-csi-provisioner,csi-rbdplugin,csi-rbdplugin-provisioner + value: rbd-csi-nodeplugin,rbd-csi-provisioner,csi-rbdplugin,csi-rbdplugin-provisioner,cephfs-csi-nodeplugin,cephfs-csi-provisioner,csi-cephfsplugin,csi-cephfsplugin-provisioner - name: SERVICE_ACCOUNTS_NAMESPACE value: default - name: VAULT_ADDR diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index 7d93e977b75..f8c46b4380b 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -26,6 +26,7 @@ import ( "github.com/ceph/ceph-csi/internal/cephfs/store" fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" csicommon "github.com/ceph/ceph-csi/internal/csi-common" + "github.com/ceph/ceph-csi/internal/kms" "github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/k8s" "github.com/ceph/ceph-csi/internal/util/log" @@ -66,18 +67,29 @@ func (cs *ControllerServer) createBackingVolume( ctx context.Context, volOptions, parentVolOpt *store.VolumeOptions, - pvID *store.VolumeIdentifier, + vID, pvID *store.VolumeIdentifier, sID *store.SnapshotIdentifier, + secrets map[string]string, ) error { var err error volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID, cs.ClusterName, cs.SetMetadata) if sID != nil { - return cs.createBackingVolumeFromSnapshotSource(ctx, volOptions, parentVolOpt, volClient, sID) + err = parentVolOpt.CopyEncryptionConfig(volOptions, sID.SnapshotID, vID.VolumeID) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + return cs.createBackingVolumeFromSnapshotSource(ctx, volOptions, parentVolOpt, volClient, sID, secrets) } if parentVolOpt != nil { + err = parentVolOpt.CopyEncryptionConfig(volOptions, pvID.VolumeID, vID.VolumeID) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + return cs.createBackingVolumeFromVolumeSource(ctx, parentVolOpt, volClient, pvID) } @@ -96,6 +108,7 @@ func (cs *ControllerServer) createBackingVolumeFromSnapshotSource( parentVolOpt *store.VolumeOptions, volClient core.SubVolumeClient, sID *store.SnapshotIdentifier, + secrets map[string]string, ) error { if err := cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil { log.ErrorLog(ctx, err.Error()) @@ -105,7 +118,7 @@ func (cs *ControllerServer) createBackingVolumeFromSnapshotSource( defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID) if volOptions.BackingSnapshot { - if err := store.AddSnapshotBackedVolumeRef(ctx, volOptions, cs.ClusterName, cs.SetMetadata); err != nil { + if err := store.AddSnapshotBackedVolumeRef(ctx, volOptions, cs.ClusterName, cs.SetMetadata, secrets); err != nil { log.ErrorLog(ctx, "failed to create snapshot-backed volume from snapshot %s: %v", sID.FsSnapshotName, err) @@ -162,7 +175,8 @@ func (cs *ControllerServer) checkContentSource( switch volumeSource.Type.(type) { case *csi.VolumeContentSource_Snapshot: snapshotID := req.VolumeContentSource.GetSnapshot().GetSnapshotId() - volOpt, _, sid, err := store.NewSnapshotOptionsFromID(ctx, snapshotID, cr, cs.ClusterName, cs.SetMetadata) + volOpt, _, sid, err := store.NewSnapshotOptionsFromID(ctx, snapshotID, cr, + req.GetSecrets(), cs.ClusterName, cs.SetMetadata) if err != nil { if errors.Is(err, cerrors.ErrSnapNotFound) { return nil, nil, nil, status.Error(codes.NotFound, err.Error()) @@ -294,6 +308,7 @@ func (cs *ControllerServer) CreateVolume( return nil, status.Error(codes.Internal, err.Error()) } + // TODO return error message if requested vol size greater than found volume return error metadata := k8s.GetVolumeMetadata(req.GetParameters()) @@ -370,7 +385,7 @@ func (cs *ControllerServer) CreateVolume( }() // Create a volume - err = cs.createBackingVolume(ctx, volOptions, parentVol, pvID, sID) + err = cs.createBackingVolume(ctx, volOptions, parentVol, vID, pvID, sID, req.GetSecrets()) if err != nil { if cerrors.IsCloneRetryError(err) { return nil, status.Error(codes.Aborted, err.Error()) @@ -529,7 +544,7 @@ func (cs *ControllerServer) DeleteVolume( } defer cr.DeleteCredentials() - if err := cs.cleanUpBackingVolume(ctx, volOptions, vID, cr); err != nil { + if err := cs.cleanUpBackingVolume(ctx, volOptions, vID, cr, secrets); err != nil { return nil, err } @@ -547,7 +562,19 @@ func (cs *ControllerServer) cleanUpBackingVolume( volOptions *store.VolumeOptions, volID *store.VolumeIdentifier, cr *util.Credentials, + secrets map[string]string, ) error { + if volOptions.IsEncrypted() && volOptions.Encryption.KMS.RequiresDEKStore() == kms.DEKStoreIntegrated { + // Only remove DEK when the KMS stores it itself. On + // GetSecret enabled KMS the DEKs are stored by + // fscrypt on the volume that is going to be deleted anyway. + log.DebugLog(ctx, "going to remove DEK for integrated store %q (fscrypt)", volOptions.Encryption.GetID()) + if err := volOptions.Encryption.RemoveDEK(volID.VolumeID); err != nil { + log.WarningLog(ctx, "failed to clean the passphrase for volume %q (file encryption): %s", + volOptions.VolID, err) + } + } + if !volOptions.BackingSnapshot { // Regular volumes need to be purged. @@ -585,7 +612,7 @@ func (cs *ControllerServer) cleanUpBackingVolume( } snapParentVolOptions, _, snapID, err := store.NewSnapshotOptionsFromID(ctx, - volOptions.BackingSnapshotID, cr, cs.ClusterName, cs.SetMetadata) + volOptions.BackingSnapshotID, cr, secrets, cs.ClusterName, cs.SetMetadata) if err != nil { absorbErrs := []error{ util.ErrPoolNotFound, @@ -874,6 +901,14 @@ func (cs *ControllerServer) CreateSnapshot( return nil, status.Error(codes.Internal, err.Error()) } + // Use same encryption KMS than source volume and copy the passphrase. The passphrase becomes + // available under the snapshot id for CreateVolume to use this snap as a backing volume + snapVolOptions := store.VolumeOptions{} + err = parentVolOptions.CopyEncryptionConfig(&snapVolOptions, sourceVolID, sID.SnapshotID) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return &csi.CreateSnapshotResponse{ Snapshot: &csi.Snapshot{ SizeBytes: info.BytesQuota, @@ -991,7 +1026,8 @@ func (cs *ControllerServer) DeleteSnapshot( } defer cs.OperationLocks.ReleaseDeleteLock(snapshotID) - volOpt, snapInfo, sid, err := store.NewSnapshotOptionsFromID(ctx, snapshotID, cr, cs.ClusterName, cs.SetMetadata) + volOpt, snapInfo, sid, err := store.NewSnapshotOptionsFromID(ctx, snapshotID, cr, + req.GetSecrets(), cs.ClusterName, cs.SetMetadata) if err != nil { switch { case errors.Is(err, util.ErrPoolNotFound): diff --git a/internal/cephfs/nodeserver.go b/internal/cephfs/nodeserver.go index 7d7e536d53a..e957d260c29 100644 --- a/internal/cephfs/nodeserver.go +++ b/internal/cephfs/nodeserver.go @@ -30,6 +30,7 @@ import ( fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" csicommon "github.com/ceph/ceph-csi/internal/csi-common" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/fscrypt" "github.com/ceph/ceph-csi/internal/util/log" "github.com/container-storage-interface/spec/lib/go/csi" @@ -88,7 +89,7 @@ func (ns *NodeServer) getVolumeOptions( return nil, status.Error(codes.Internal, err.Error()) } - volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), volContext) + volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), volContext, volSecrets) if err != nil { if !errors.Is(err, cerrors.ErrNonStaticVolume) { return nil, status.Error(codes.Internal, err.Error()) @@ -118,6 +119,39 @@ func validateSnapshotBackedVolCapability(volCap *csi.VolumeCapability) error { return nil } +// maybeUnlockFileEncryption unlocks fscrypt on stagingTargetPath, if volOptions enable encryption. +func maybeUnlockFileEncryption( + ctx context.Context, + volOptions *store.VolumeOptions, + stagingTargetPath string, + volID fsutil.VolumeID, +) error { + if volOptions.IsEncrypted() { + log.DebugLog(ctx, "cephfs: unlocking fscrypt on volume %q path %s", volID, stagingTargetPath) + + return fscrypt.Unlock(ctx, volOptions.Encryption, stagingTargetPath, string(volID)) + } + + return nil +} + +// maybeInitializeFileEncryption initializes KMS and node specifics, if volContext enables encryption. +func maybeInitializeFileEncryption( + ctx context.Context, + mnt mounter.VolumeMounter, + volOptions *store.VolumeOptions, +) error { + if volOptions.IsEncrypted() { + if _, isFuse := mnt.(*mounter.FuseMounter); isFuse { + return errors.New("FUSE mounter does not support encryption") + } + + return fscrypt.InitializeNode(ctx) + } + + return nil +} + // NodeStageVolume mounts the volume to a staging path on the node. func (ns *NodeServer) NodeStageVolume( ctx context.Context, @@ -170,6 +204,11 @@ func (ns *NodeServer) NodeStageVolume( return nil, status.Error(codes.Internal, err.Error()) } + err = maybeInitializeFileEncryption(ctx, mnt, volOptions) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // Check if the volume is already mounted if err = ns.tryRestoreFuseMountInNodeStage(ctx, mnt, stagingTargetPath); err != nil { @@ -185,6 +224,9 @@ func (ns *NodeServer) NodeStageVolume( if isMnt { log.DebugLog(ctx, "cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath) + if err = maybeUnlockFileEncryption(ctx, volOptions, stagingTargetPath, volID); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } return &csi.NodeStageVolumeResponse{}, nil } @@ -205,6 +247,10 @@ func (ns *NodeServer) NodeStageVolume( log.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath) + if err = maybeUnlockFileEncryption(ctx, volOptions, stagingTargetPath, volID); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if _, isFuse := mnt.(*mounter.FuseMounter); isFuse { // FUSE mount recovery needs NodeStageMountinfo records. @@ -452,6 +498,16 @@ func (ns *NodeServer) NodePublishVolume( } // It's not, mount now + encrypted, err := store.IsEncrypted(ctx, req.GetVolumeContext()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if encrypted { + stagingTargetPath = fscrypt.AppendEncyptedSubdirectory(stagingTargetPath) + if err = fscrypt.IsDirectoryUnlocked(stagingTargetPath, "ceph"); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if err = mounter.BindMount( ctx, diff --git a/internal/cephfs/store/backingsnapshot.go b/internal/cephfs/store/backingsnapshot.go index b57321ceb81..11d3cb1f07c 100644 --- a/internal/cephfs/store/backingsnapshot.go +++ b/internal/cephfs/store/backingsnapshot.go @@ -36,6 +36,7 @@ func AddSnapshotBackedVolumeRef( volOptions *VolumeOptions, clusterName string, setMetadata bool, + secrets map[string]string, ) error { ioctx, err := volOptions.conn.GetIoctx(volOptions.MetadataPool) if err != nil { @@ -98,7 +99,7 @@ func AddSnapshotBackedVolumeRef( // deleting the backing snapshot. Make sure the snapshot still exists by // trying to retrieve it again. _, _, _, err = NewSnapshotOptionsFromID(ctx, - volOptions.BackingSnapshotID, volOptions.conn.Creds, clusterName, setMetadata) + volOptions.BackingSnapshotID, volOptions.conn.Creds, secrets, clusterName, setMetadata) if err != nil { log.ErrorLog(ctx, "failed to get backing snapshot %s: %v", volOptions.BackingSnapshotID, err) } diff --git a/internal/cephfs/store/fsjournal.go b/internal/cephfs/store/fsjournal.go index daf328f1a20..b0a3bb5b14e 100644 --- a/internal/cephfs/store/fsjournal.go +++ b/internal/cephfs/store/fsjournal.go @@ -90,8 +90,10 @@ func CheckVolExists(ctx context.Context, } defer j.Destroy() + kmsID, encryptionType := getEncryptionConfig(volOptions) + imageData, err := j.CheckReservation( - ctx, volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", "", util.EncryptionTypeNone) + ctx, volOptions.MetadataPool, volOptions.RequestName, volOptions.NamePrefix, "", kmsID, encryptionType) if err != nil { return nil, err } @@ -249,6 +251,14 @@ func updateTopologyConstraints(volOpts *VolumeOptions) error { return nil } +func getEncryptionConfig(volOptions *VolumeOptions) (string, util.EncryptionType) { + if volOptions.IsEncrypted() { + return volOptions.Encryption.GetID(), util.EncryptionTypeFile + } + + return "", util.EncryptionTypeNone +} + // ReserveVol is a helper routine to request a UUID reservation for the CSI VolumeName and, // to generate the volume identifier for the reserved UUID. func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[string]string) (*VolumeIdentifier, error) { @@ -276,10 +286,13 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin } defer j.Destroy() + kmsID, encryptionType := getEncryptionConfig(volOptions) + imageUUID, vid.FsSubvolName, err = j.ReserveName( ctx, volOptions.MetadataPool, util.InvalidPoolID, volOptions.MetadataPool, util.InvalidPoolID, volOptions.RequestName, - volOptions.NamePrefix, "", "", volOptions.ReservedID, "", volOptions.BackingSnapshotID, util.EncryptionTypeNone) + volOptions.NamePrefix, "", kmsID, volOptions.ReservedID, volOptions.Owner, + volOptions.BackingSnapshotID, encryptionType) if err != nil { return nil, err } @@ -319,10 +332,13 @@ func ReserveSnap( } defer j.Destroy() + kmsID, encryptionType := getEncryptionConfig(volOptions) + imageUUID, vid.FsSnapshotName, err = j.ReserveName( ctx, volOptions.MetadataPool, util.InvalidPoolID, volOptions.MetadataPool, util.InvalidPoolID, snap.RequestName, - snap.NamePrefix, parentSubVolName, "", snap.ReservedID, "", "", util.EncryptionTypeNone) + snap.NamePrefix, parentSubVolName, kmsID, snap.ReservedID, "", + volOptions.Owner, encryptionType) if err != nil { return nil, err } @@ -390,8 +406,10 @@ func CheckSnapExists( } defer j.Destroy() + kmsID, encryptionType := getEncryptionConfig(volOptions) + snapData, err := j.CheckReservation( - ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "", util.EncryptionTypeNone) + ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, kmsID, encryptionType) if err != nil { return nil, nil, err } diff --git a/internal/cephfs/store/volumeoptions.go b/internal/cephfs/store/volumeoptions.go index 044f1c7fd27..24e96655cf7 100644 --- a/internal/cephfs/store/volumeoptions.go +++ b/internal/cephfs/store/volumeoptions.go @@ -29,10 +29,16 @@ import ( "github.com/ceph/ceph-csi/internal/cephfs/core" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" + kmsapi "github.com/ceph/ceph-csi/internal/kms" "github.com/ceph/ceph-csi/internal/util" + "github.com/ceph/ceph-csi/internal/util/k8s" "github.com/ceph/ceph-csi/internal/util/log" ) +const ( + cephfsDefaultEncryptionType = util.EncryptionTypeFile +) + type VolumeOptions struct { core.SubVolume @@ -55,6 +61,11 @@ type VolumeOptions struct { Topology map[string]string FscID int64 + // Encryption provides access to optional VolumeEncryption functions + Encryption *util.VolumeEncryption + // Owner is the creator (tenant, Kubernetes Namespace) of the volume + Owner string + // conn is a connection to the Ceph cluster obtained from a ConnPool conn *util.ClusterConnection @@ -84,6 +95,9 @@ func (vo *VolumeOptions) Destroy() { if vo.conn != nil { vo.conn.Destroy() } + if vo.IsEncrypted() { + vo.Encryption.Destroy() + } } func validateNonEmptyField(field, fieldName string) error { @@ -196,6 +210,7 @@ func fmtBackingSnapshotOptionMismatch(optName, expected, actual string) error { // NewVolumeOptions generates a new instance of volumeOptions from the provided // CSI request parameters. +// nolint:gocyclo,cyclop // TODO: reduce complexity func NewVolumeOptions( ctx context.Context, requestName, @@ -219,6 +234,7 @@ func NewVolumeOptions( opts.ClusterID = clusterData.ClusterID opts.Monitors = strings.Join(clusterData.Monitors, ",") opts.SubvolumeGroup = clusterData.CephFS.SubvolumeGroup + opts.Owner = k8s.GetOwner(volOptions) if err = extractOptionalOption(&opts.Pool, "pool", volOptions); err != nil { return nil, err @@ -248,6 +264,10 @@ func NewVolumeOptions( return nil, err } + if err = opts.InitKMS(ctx, volOptions, req.GetSecrets()); err != nil { + return nil, fmt.Errorf("failed to init KMS: %w", err) + } + if backingSnapshotBool != "" { if opts.BackingSnapshot, err = strconv.ParseBool(backingSnapshotBool); err != nil { return nil, fmt.Errorf("failed to parse backingSnapshot: %w", err) @@ -294,7 +314,7 @@ func NewVolumeOptions( opts.BackingSnapshotID = req.GetVolumeContentSource().GetSnapshot().GetSnapshotId() - err = opts.populateVolumeOptionsFromBackingSnapshot(ctx, cr, clusterName, setMetadata) + err = opts.populateVolumeOptionsFromBackingSnapshot(ctx, cr, req.GetSecrets(), clusterName, setMetadata) if err != nil { return nil, err } @@ -305,6 +325,7 @@ func NewVolumeOptions( // newVolumeOptionsFromVolID generates a new instance of volumeOptions and VolumeIdentifier // from the provided CSI VolumeID. +// nolint:gocyclo,cyclop // TODO: reduce complexity func NewVolumeOptionsFromVolID( ctx context.Context, volID string, @@ -382,6 +403,7 @@ func NewVolumeOptionsFromVolID( } volOptions.RequestName = imageAttributes.RequestName vid.FsSubvolName = imageAttributes.ImageName + volOptions.Owner = imageAttributes.Owner if volOpt != nil { if err = extractOptionalOption(&volOptions.Pool, "pool", volOpt); err != nil { @@ -403,6 +425,10 @@ func NewVolumeOptionsFromVolID( if err = extractMounter(&volOptions.Mounter, volOpt); err != nil { return nil, nil, err } + + if err = volOptions.InitKMS(ctx, volOpt, secrets); err != nil { + return nil, nil, err + } } if imageAttributes.BackingSnapshotID != "" || volOptions.BackingSnapshotID != "" { @@ -414,11 +440,18 @@ func NewVolumeOptionsFromVolID( volOptions.SubVolume.VolID = vid.FsSubvolName if volOptions.BackingSnapshot { - err = volOptions.populateVolumeOptionsFromBackingSnapshot(ctx, cr, clusterName, setMetadata) + err = volOptions.populateVolumeOptionsFromBackingSnapshot(ctx, cr, secrets, clusterName, setMetadata) } else { err = volOptions.populateVolumeOptionsFromSubvolume(ctx, clusterName, setMetadata) } + if volOpt == nil && imageAttributes.KmsID != "" && volOptions.Encryption == nil { + err = volOptions.ConfigureEncryption(ctx, imageAttributes.KmsID, secrets) + if err != nil { + return &volOptions, &vid, err + } + } + return &volOptions, &vid, err } @@ -447,6 +480,7 @@ func (vo *VolumeOptions) populateVolumeOptionsFromSubvolume( func (vo *VolumeOptions) populateVolumeOptionsFromBackingSnapshot( ctx context.Context, cr *util.Credentials, + secrets map[string]string, clusterName string, setMetadata bool, ) error { @@ -471,7 +505,7 @@ func (vo *VolumeOptions) populateVolumeOptionsFromBackingSnapshot( } parentBackingSnapVolOpts, _, snapID, err := NewSnapshotOptionsFromID(ctx, - vo.BackingSnapshotID, cr, clusterName, setMetadata) + vo.BackingSnapshotID, cr, secrets, clusterName, setMetadata) if err != nil { return fmt.Errorf("failed to retrieve backing snapshot %s: %w", vo.BackingSnapshotID, err) } @@ -576,6 +610,11 @@ func NewVolumeOptionsFromMonitorList( return nil, nil, err } + opts.Owner = k8s.GetOwner(options) + if err = opts.InitKMS(context.TODO(), options, secrets); err != nil { + return nil, nil, err + } + vid.FsSubvolName = volID vid.VolumeID = volID @@ -591,7 +630,7 @@ func NewVolumeOptionsFromMonitorList( // detected to be a statically provisioned volume. func NewVolumeOptionsFromStaticVolume( volID string, - options map[string]string, + options, secrets map[string]string, ) (*VolumeOptions, *VolumeIdentifier, error) { var ( opts VolumeOptions @@ -625,6 +664,7 @@ func NewVolumeOptionsFromStaticVolume( opts.ClusterID = clusterData.ClusterID opts.Monitors = strings.Join(clusterData.Monitors, ",") opts.SubvolumeGroup = clusterData.CephFS.SubvolumeGroup + opts.Owner = k8s.GetOwner(options) if err = extractOption(&opts.RootPath, "rootPath", options); err != nil { return nil, nil, err @@ -650,6 +690,10 @@ func NewVolumeOptionsFromStaticVolume( return nil, nil, err } + if err = opts.InitKMS(context.TODO(), options, secrets); err != nil { + return nil, nil, err + } + vid.FsSubvolName = opts.RootPath vid.VolumeID = volID @@ -666,6 +710,7 @@ func NewSnapshotOptionsFromID( ctx context.Context, snapID string, cr *util.Credentials, + secrets map[string]string, clusterName string, setMetadata bool, ) (*VolumeOptions, *core.SnapshotInfo, *SnapshotIdentifier, error) { @@ -739,8 +784,16 @@ func NewSnapshotOptionsFromID( sid.FsSubvolName = imageAttributes.SourceName volOptions.SubVolume.VolID = sid.FsSubvolName + volOptions.Owner = imageAttributes.Owner vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID, clusterName, setMetadata) + if imageAttributes.KmsID != "" && volOptions.Encryption == nil { + err = volOptions.ConfigureEncryption(ctx, imageAttributes.KmsID, secrets) + if err != nil { + return &volOptions, nil, &sid, err + } + } + subvolInfo, err := vol.GetSubVolumeInfo(ctx) if err != nil { return &volOptions, nil, &sid, err @@ -788,3 +841,139 @@ func GenSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (*S return cephfsSnap, nil } + +func parseEncryptionOpts(volOptions map[string]string) (string, util.EncryptionType, error) { + var ( + err error + ok bool + encrypted, kmsID string + ) + encrypted, ok = volOptions["encrypted"] + if !ok { + return "", util.EncryptionTypeNone, nil + } + kmsID, err = util.FetchEncryptionKMSID(encrypted, volOptions["encryptionKMSID"]) + if err != nil { + return "", util.EncryptionTypeInvalid, err + } + + encType := util.FetchEncryptionType(volOptions, cephfsDefaultEncryptionType) + + return kmsID, encType, nil +} + +// IsEncrypted returns true if volOptions enables file encryption. +func IsEncrypted(ctx context.Context, volOptions map[string]string) (bool, error) { + _, encType, err := parseEncryptionOpts(volOptions) + if err != nil { + return false, err + } + + return encType == util.EncryptionTypeFile, nil +} + +// CopyEncryptionConfig copies passphrases and initializes a fresh +// Encryption struct if necessary from (vo, vID) to (cp, cpVID). +func (vo *VolumeOptions) CopyEncryptionConfig(cp *VolumeOptions, vID, cpVID string) error { + var err error + + if !vo.IsEncrypted() { + return nil + } + + if vID == cpVID { + return fmt.Errorf("BUG: %v and %v have the same VolID %q "+ + "set!? Call stack: %s", vo, cp, vID, util.CallStack()) + } + + if cp.Encryption == nil { + cp.Encryption, err = util.NewVolumeEncryption(vo.Encryption.GetID(), vo.Encryption.KMS) + if errors.Is(err, util.ErrDEKStoreNeeded) { + _, err := vo.Encryption.KMS.GetSecret("") + if errors.Is(err, kmsapi.ErrGetSecretUnsupported) { + return err + } + } + } + + if vo.Encryption.KMS.RequiresDEKStore() == kmsapi.DEKStoreIntegrated { + passphrase, err := vo.Encryption.GetCryptoPassphrase(vID) + if err != nil { + return fmt.Errorf("failed to fetch passphrase for %q (%+v): %w", + vID, vo, err) + } + + err = cp.Encryption.StoreCryptoPassphrase(cpVID, passphrase) + if err != nil { + return fmt.Errorf("failed to store passphrase for %q (%+v): %w", + cpVID, cp, err) + } + } + + return nil +} + +// ConfigureEncryption initializes the Ceph CSI key management from +// kmsID and credentials. Sets vo.Encryption on success. +func (vo *VolumeOptions) ConfigureEncryption( + ctx context.Context, + kmsID string, + credentials map[string]string, +) error { + kms, err := kmsapi.GetKMS(vo.Owner, kmsID, credentials) + if err != nil { + log.ErrorLog(ctx, "get KMS failed %+v: %v", vo, err) + + return err + } + + vo.Encryption, err = util.NewVolumeEncryption(kmsID, kms) + + if errors.Is(err, util.ErrDEKStoreNeeded) { + // fscrypt uses secrets directly from the KMS. + // Therefore we do not support an additional DEK + // store. Since not all "metadata" KMS support + // GetSecret, test for support here. Postpone any + // other error handling + _, err := vo.Encryption.KMS.GetSecret("") + if errors.Is(err, kmsapi.ErrGetSecretUnsupported) { + return err + } + } + + return nil +} + +// InitKMS initialized the Ceph CSI key management by parsing the +// configuration from volume options + credentials. Sets vo.Encryption +// on success. +func (vo *VolumeOptions) InitKMS( + ctx context.Context, + volOptions, credentials map[string]string, +) error { + var err error + + kmsID, encType, err := parseEncryptionOpts(volOptions) + if err != nil { + return err + } + + if encType == util.EncryptionTypeNone { + return nil + } + + if encType != util.EncryptionTypeFile { + return fmt.Errorf("unsupported encryption type %v. only supported type is 'file'", encType) + } + + err = vo.ConfigureEncryption(ctx, kmsID, credentials) + if err != nil { + return fmt.Errorf("invalid encryption kms configuration: %w", err) + } + + return nil +} + +func (vo *VolumeOptions) IsEncrypted() bool { + return vo.Encryption != nil +}