From 863d9b2514b7fca1e3f3067c9a141dccbd9a5a3c Mon Sep 17 00:00:00 2001 From: Xun Jiang Date: Wed, 15 May 2024 10:11:22 +0800 Subject: [PATCH] Modify the volume helper logic. Signed-off-by: Xun Jiang --- changelogs/unreleased/7794-blackpiglet | 1 + internal/volumehelper/volume_policy_helper.go | 82 ++++++++++++------- .../volumehelper/volume_policy_helper_test.go | 73 +++++++++++------ pkg/backup/actions/csi/pvc_action.go | 8 -- pkg/backup/actions/csi/pvc_action_test.go | 5 -- pkg/backup/backup.go | 15 ++++ pkg/backup/backup_test.go | 25 ++++-- pkg/backup/item_backupper.go | 74 ++++++++--------- pkg/restore/pv_restorer.go | 6 -- pkg/restore/pv_restorer_test.go | 1 - pkg/restore/restore.go | 1 - 11 files changed, 172 insertions(+), 119 deletions(-) create mode 100644 changelogs/unreleased/7794-blackpiglet diff --git a/changelogs/unreleased/7794-blackpiglet b/changelogs/unreleased/7794-blackpiglet new file mode 100644 index 00000000000..610fee30bfb --- /dev/null +++ b/changelogs/unreleased/7794-blackpiglet @@ -0,0 +1 @@ +Modify the volume helper logic. \ No newline at end of file diff --git a/internal/volumehelper/volume_policy_helper.go b/internal/volumehelper/volume_policy_helper.go index a1929846f2e..98c6cb32ddb 100644 --- a/internal/volumehelper/volume_policy_helper.go +++ b/internal/volumehelper/volume_policy_helper.go @@ -6,7 +6,7 @@ import ( "github.com/vmware-tanzu/velero/internal/resourcepolicies" - kbclient "sigs.k8s.io/controller-runtime/pkg/client" + crclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/pkg/util/boolptr" kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -21,17 +21,38 @@ import ( ) type VolumeHelper interface { - GetVolumesForFSBackup(pod *corev1api.Pod, defaultVolumesToFsBackup, backupExcludePVC bool, kbclient kbclient.Client) ([]string, []string, error) - ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource, kbclient kbclient.Client) (bool, error) + GetVolumesForFSBackup(pod *corev1api.Pod) ([]string, []string, error) + ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error) } -type VolumeHelperImpl struct { - VolumePolicy *resourcepolicies.Policies - SnapshotVolumes *bool - Logger logrus.FieldLogger +type volumeHelperImpl struct { + volumePolicy *resourcepolicies.Policies + snapshotVolumes *bool + logger logrus.FieldLogger + client crclient.Client + defaultVolumesToFSBackup bool + backupExcludePVC bool } -func (v *VolumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource, kbclient kbclient.Client) (bool, error) { +func NewVolumeHelperImpl( + volumePolicy *resourcepolicies.Policies, + snapshotVolumes *bool, + logger logrus.FieldLogger, + client crclient.Client, + defaultVolumesToFSBackup bool, + backupExcludePVC bool, +) VolumeHelper { + return &volumeHelperImpl{ + volumePolicy: volumePolicy, + snapshotVolumes: snapshotVolumes, + logger: logger, + client: client, + defaultVolumesToFSBackup: defaultVolumesToFSBackup, + backupExcludePVC: backupExcludePVC, + } +} + +func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, groupResource schema.GroupResource) (bool, error) { // check if volume policy exists and also check if the object(pv/pvc) fits a volume policy criteria and see if the associated action is snapshot // if it is not snapshot then skip the code path for snapshotting the PV/PVC pvc := new(corev1api.PersistentVolumeClaim) @@ -43,7 +64,7 @@ func (v *VolumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group return false, err } - pv, err = kubeutil.GetPVForPVC(pvc, kbclient) + pv, err = kubeutil.GetPVForPVC(pvc, v.client) if err != nil { return false, err } @@ -55,38 +76,43 @@ func (v *VolumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group } } - if v.VolumePolicy != nil { - action, err := v.VolumePolicy.GetMatchAction(pv) + if v.volumePolicy != nil { + action, err := v.volumePolicy.GetMatchAction(pv) if err != nil { return false, err } // Also account for SnapshotVolumes flag on backup CR - if action != nil && action.Type == resourcepolicies.Snapshot && boolptr.IsSetToTrue(v.SnapshotVolumes) { - v.Logger.Infof(fmt.Sprintf("performing snapshot action for pv %s as it satisfies the volume policy criteria and snapshotVolumes is set to true", pv.Name)) + if action != nil && action.Type == resourcepolicies.Snapshot { + v.logger.Infof(fmt.Sprintf("performing snapshot action for pv %s as it satisfies the volume policy criteria", pv.Name)) return true, nil } - v.Logger.Infof(fmt.Sprintf("skipping snapshot action for pv %s possibly due to not satisfying the volume policy criteria or snapshotVolumes is not true", pv.Name)) + v.logger.Infof(fmt.Sprintf("skipping snapshot action for pv %s due to not satisfying the volume policy criteria", pv.Name)) return false, nil + } else if !boolptr.IsSetToFalse(v.snapshotVolumes) { + // If the backup.Spec.SnapshotVolumes is not set, or set to true, then should take the snapshot. + v.logger.Infof("performing snapshot action for pv %s as volume policy is nil, but the snapshotVolumes is set to true") + return true, nil } + v.logger.Infof(fmt.Sprintf("skipping snapshot action for pv %s possibly due to no volume policy setting and snapshotVolumes is false", pv.Name)) return false, nil } -func (v *VolumeHelperImpl) GetVolumesForFSBackup(pod *corev1api.Pod, defaultVolumesToFsBackup, backupExcludePVC bool, kbclient kbclient.Client) ([]string, []string, error) { +func (v *volumeHelperImpl) GetVolumesForFSBackup(pod *corev1api.Pod) ([]string, []string, error) { // Check if there is a fs-backup/snapshot volume policy specified by the user, if yes then use the volume policy approach to // get the list volumes for fs-backup else go via the legacy annotation based approach var fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, volsToProcessByLegacyApproach = make([]string, 0), make([]string, 0), make([]string, 0) var err error - if v.VolumePolicy != nil { + if v.volumePolicy != nil { // Get the list of volumes to back up using pod volume backup for the given pod matching fs-backup volume policy action - v.Logger.Infof("Volume Policy specified by the user, using volume policy approach to segregate pod volumes for fs-backup") - // GetVolumesMatchingFSBackupAction return 3 list of Volumes: + v.logger.Infof("Volume Policy specified by the user, using volume policy approach to segregate pod volumes for fs-backup") + // getVolumesMatchingFSBackupAction return 3 list of Volumes: // fsBackupVolumePolicyVols: Volumes that have a matching fs-backup action from the volume policy specified by the user // nonFsBackupVolumePolicyVols: Volumes that have an action matching from the volume policy specified by the user, but it is not fs-backup action // volsToProcessByLegacyApproach: Volumes that did not have any matching action i.e. action was nil from the volume policy specified by the user, these volumes will be processed via the legacy annotations based approach (fallback option) - fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, volsToProcessByLegacyApproach, err = v.GetVolumesMatchingFSBackupAction(pod, v.VolumePolicy, backupExcludePVC, kbclient) + fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, volsToProcessByLegacyApproach, err = v.getVolumesMatchingFSBackupAction(pod, v.volumePolicy, v.backupExcludePVC, v.client) if err != nil { return fsBackupVolumePolicyVols, nonFsBackupVolumePolicyVols, err } @@ -99,9 +125,9 @@ func (v *VolumeHelperImpl) GetVolumesForFSBackup(pod *corev1api.Pod, defaultVolu // process legacy annotation based approach, this will done when: // 1. volume policy os specified by the user // 2. And there are some volumes for which the volume policy approach did not get any supported matching actions - if v.VolumePolicy != nil && len(volsToProcessByLegacyApproach) > 0 { - v.Logger.Infof("volume policy specified by the user but there are volumes with no matching action, using legacy approach based on annotations as a fallback for those volumes") - includedVolumesFromLegacyFallBack, optedOutVolumesFromLegacyFallBack := pdvolumeutil.GetVolumesByPod(pod, defaultVolumesToFsBackup, backupExcludePVC, volsToProcessByLegacyApproach) + if v.volumePolicy != nil && len(volsToProcessByLegacyApproach) > 0 { + v.logger.Infof("volume policy specified by the user but there are volumes with no matching action, using legacy approach based on annotations as a fallback for those volumes") + includedVolumesFromLegacyFallBack, optedOutVolumesFromLegacyFallBack := pdvolumeutil.GetVolumesByPod(pod, v.defaultVolumesToFSBackup, v.backupExcludePVC, volsToProcessByLegacyApproach) // merge the volumePolicy approach and legacy Fallback lists fsBackupVolumePolicyVols = append(fsBackupVolumePolicyVols, includedVolumesFromLegacyFallBack...) nonFsBackupVolumePolicyVols = append(nonFsBackupVolumePolicyVols, optedOutVolumesFromLegacyFallBack...) @@ -111,19 +137,19 @@ func (v *VolumeHelperImpl) GetVolumesForFSBackup(pod *corev1api.Pod, defaultVolu // Get the list of volumes to back up using pod volume backup from the pod's annotations. // We will also pass the list of volume that did not have any supported volume policy action matched in legacy approach so that // those volumes get processed via legacy annotation based approach, this is a fallback option on annotation based legacy approach - v.Logger.Infof("fs-backup or snapshot Volume Policy not specified by the user, using legacy approach based on annotations") - includedVolumes, optedOutVolumes := pdvolumeutil.GetVolumesByPod(pod, defaultVolumesToFsBackup, backupExcludePVC, volsToProcessByLegacyApproach) + v.logger.Infof("fs-backup or snapshot Volume Policy not specified by the user, using legacy approach based on annotations") + includedVolumes, optedOutVolumes := pdvolumeutil.GetVolumesByPod(pod, v.defaultVolumesToFSBackup, v.backupExcludePVC, volsToProcessByLegacyApproach) return includedVolumes, optedOutVolumes, nil } -// GetVolumesMatchingFSBackupAction returns a list of volume names to backup for the provided pod having fs-backup volume policy action -func (v *VolumeHelperImpl) GetVolumesMatchingFSBackupAction(pod *corev1api.Pod, volumePolicies *resourcepolicies.Policies, backupExcludePVC bool, kbclient kbclient.Client) ([]string, []string, []string, error) { +// getVolumesMatchingFSBackupAction returns a list of volume names to backup for the provided pod having fs-backup volume policy action +func (v *volumeHelperImpl) getVolumesMatchingFSBackupAction(pod *corev1api.Pod, volumePolicies *resourcepolicies.Policies, backupExcludePVC bool, kbclient crclient.Client) ([]string, []string, []string, error) { FSBackupActionMatchingVols := []string{} FSBackupNonActionMatchingVols := []string{} NoActionMatchingVols := []string{} for i, vol := range pod.Spec.Volumes { - if !v.ShouldIncludeVolumeInBackup(vol, backupExcludePVC) { + if !v.shouldIncludeVolumeInBackup(vol, backupExcludePVC) { continue } @@ -163,7 +189,7 @@ func (v *VolumeHelperImpl) GetVolumesMatchingFSBackupAction(pod *corev1api.Pod, return FSBackupActionMatchingVols, FSBackupNonActionMatchingVols, NoActionMatchingVols, nil } -func (v *VolumeHelperImpl) ShouldIncludeVolumeInBackup(vol corev1api.Volume, backupExcludePVC bool) bool { +func (v *volumeHelperImpl) shouldIncludeVolumeInBackup(vol corev1api.Volume, backupExcludePVC bool) bool { includeVolumeInBackup := true // cannot backup hostpath volumes as they are not mounted into /var/lib/kubelet/pods // and therefore not accessible to the node agent daemon set. diff --git a/internal/volumehelper/volume_policy_helper_test.go b/internal/volumehelper/volume_policy_helper_test.go index b66cb2dfaf2..121a47b0be9 100644 --- a/internal/volumehelper/volume_policy_helper_test.go +++ b/internal/volumehelper/volume_policy_helper_test.go @@ -1,3 +1,19 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package volumehelper import ( @@ -93,7 +109,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) { expectedErr: false, }, { - name: "Given PV object matches volume policy snapshot action snapshotVolumes flags is false returns false and no error", + name: "Given PV object matches volume policy snapshot action snapshotVolumes flags is false returns true and no error", obj: PVObjectGP2, groupResource: kuberesource.PersistentVolumes, resourcePolicies: resourcepolicies.ResourcePolicies{ @@ -110,7 +126,7 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) { }, }, snapshotVolumesFlag: pointer.Bool(false), - shouldSnapshot: false, + shouldSnapshot: true, expectedErr: false, }, { @@ -203,12 +219,13 @@ func TestVolumeHelperImpl_ShouldPerformSnapshot(t *testing.T) { if err != nil { t.Fatalf("failed to build policy with error %v", err) } - vh := &VolumeHelperImpl{ - VolumePolicy: p, - SnapshotVolumes: tc.snapshotVolumesFlag, - Logger: velerotest.NewLogger(), + vh := &volumeHelperImpl{ + volumePolicy: p, + snapshotVolumes: tc.snapshotVolumesFlag, + logger: velerotest.NewLogger(), + client: fakeClient, } - ActualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(tc.obj, tc.groupResource, fakeClient) + ActualShouldSnapshot, actualError := vh.ShouldPerformSnapshot(tc.obj, tc.groupResource) if tc.expectedErr { assert.NotNil(t, actualError, "Want error; Got nil error") return @@ -354,12 +371,13 @@ func TestVolumeHelperImpl_ShouldIncludeVolumeInBackup(t *testing.T) { if err != nil { t.Fatalf("failed to build policy with error %v", err) } - vh := &VolumeHelperImpl{ - VolumePolicy: p, - SnapshotVolumes: pointer.Bool(true), - Logger: velerotest.NewLogger(), + vh := &volumeHelperImpl{ + volumePolicy: p, + snapshotVolumes: pointer.Bool(true), + logger: velerotest.NewLogger(), + backupExcludePVC: tc.backupExcludePVC, } - actualShouldInclude := vh.ShouldIncludeVolumeInBackup(tc.vol, tc.backupExcludePVC) + actualShouldInclude := vh.shouldIncludeVolumeInBackup(tc.vol, vh.backupExcludePVC) assert.Equalf(t, actualShouldInclude, tc.shouldInclude, "Want shouldInclude as %v; Got actualShouldInclude as %v", tc.shouldInclude, actualShouldInclude) }) } @@ -610,14 +628,18 @@ func TestVolumeHelperImpl_GetVolumesMatchingFSBackupAction(t *testing.T) { if err != nil { t.Fatalf("failed to build policy with error %v", err) } - vh := &VolumeHelperImpl{ - VolumePolicy: p, - SnapshotVolumes: pointer.Bool(true), - Logger: velerotest.NewLogger(), - } + objs := []runtime.Object{samplePod, samplePVC1, samplePVC2, samplePV1, samplePV2} fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...) - gotFSBackupActionMatchingVols, gotFSBackupNonActionMatchingVols, gotNoActionMatchingVols, actualError := vh.GetVolumesMatchingFSBackupAction(samplePod, vh.VolumePolicy, tc.backupExcludePVC, fakeClient) + + vh := &volumeHelperImpl{ + volumePolicy: p, + snapshotVolumes: pointer.Bool(true), + logger: velerotest.NewLogger(), + client: fakeClient, + backupExcludePVC: tc.backupExcludePVC, + } + gotFSBackupActionMatchingVols, gotFSBackupNonActionMatchingVols, gotNoActionMatchingVols, actualError := vh.getVolumesMatchingFSBackupAction(samplePod, vh.volumePolicy, vh.backupExcludePVC, vh.client) if tc.expectedErr { assert.NotNil(t, actualError, "Want error; Got nil error") return @@ -681,14 +703,17 @@ func TestVolumeHelperImpl_GetVolumesForFSBackup(t *testing.T) { if err != nil { t.Fatalf("failed to build policy with error %v", err) } - vh := &VolumeHelperImpl{ - VolumePolicy: p, - SnapshotVolumes: pointer.Bool(true), - Logger: velerotest.NewLogger(), - } objs := []runtime.Object{samplePod, samplePVC1, samplePVC2, samplePV1, samplePV2} fakeClient := velerotest.NewFakeControllerRuntimeClient(t, objs...) - gotIncludedVolumes, gotOptedOutVolumes, actualError := vh.GetVolumesForFSBackup(samplePod, tc.defaultVolumesToFsBackup, tc.backupExcludePVC, fakeClient) + vh := &volumeHelperImpl{ + volumePolicy: p, + snapshotVolumes: pointer.Bool(true), + logger: velerotest.NewLogger(), + client: fakeClient, + defaultVolumesToFSBackup: tc.defaultVolumesToFsBackup, + backupExcludePVC: tc.backupExcludePVC, + } + gotIncludedVolumes, gotOptedOutVolumes, actualError := vh.GetVolumesForFSBackup(samplePod) if tc.expectedErr { assert.NotNil(t, actualError, "Want error; Got nil error") return diff --git a/pkg/backup/actions/csi/pvc_action.go b/pkg/backup/actions/csi/pvc_action.go index 1417e912062..33ae80ff6e4 100644 --- a/pkg/backup/actions/csi/pvc_action.go +++ b/pkg/backup/actions/csi/pvc_action.go @@ -64,14 +64,6 @@ func (p *pvcBackupItemAction) AppliesTo() (velero.ResourceSelector, error) { } func (p *pvcBackupItemAction) validateBackup(backup velerov1api.Backup) (valid bool) { - // Do nothing if volume snapshots have not been requested in this backup - if boolptr.IsSetToFalse(backup.Spec.SnapshotVolumes) { - p.log.Infof( - "Volume snapshotting not requested for backup %s/%s", - backup.Namespace, backup.Name) - return false - } - if backup.Status.Phase == velerov1api.BackupPhaseFinalizing || backup.Status.Phase == velerov1api.BackupPhaseFinalizingPartiallyFailed { p.log.WithFields( diff --git a/pkg/backup/actions/csi/pvc_action_test.go b/pkg/backup/actions/csi/pvc_action_test.go index aeda894dc20..3819e2950d4 100644 --- a/pkg/backup/actions/csi/pvc_action_test.go +++ b/pkg/backup/actions/csi/pvc_action_test.go @@ -62,11 +62,6 @@ func TestExecute(t *testing.T) { expectedDataUpload *velerov2alpha1.DataUpload expectedPVC *corev1.PersistentVolumeClaim }{ - { - name: "Skip PVC handling if SnapshotVolume set to false", - backup: builder.ForBackup("velero", "test").SnapshotVolumes(false).Result(), - expectedErr: nil, - }, { name: "Skip PVC BIA when backup is in finalizing phase", backup: builder.ForBackup("velero", "test").Phase(velerov1api.BackupPhaseFinalizing).Result(), diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index 4e884119130..4f60d6ebdd8 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -39,7 +39,9 @@ import ( kbclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/internal/hook" + "github.com/vmware-tanzu/velero/internal/resourcepolicies" "github.com/vmware-tanzu/velero/internal/volume" + "github.com/vmware-tanzu/velero/internal/volumehelper" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" "github.com/vmware-tanzu/velero/pkg/client" @@ -321,6 +323,11 @@ func (kb *kubernetesBackupper) BackupWithResolvers( } backupRequest.Status.Progress = &velerov1api.BackupProgress{TotalItems: len(items)} + var resourcePolicy *resourcepolicies.Policies + if backupRequest.ResPolicies != nil { + resourcePolicy = backupRequest.ResPolicies + } + itemBackupper := &itemBackupper{ backupRequest: backupRequest, tarWriter: tw, @@ -334,6 +341,14 @@ func (kb *kubernetesBackupper) BackupWithResolvers( PodCommandExecutor: kb.podCommandExecutor, }, hookTracker: hook.NewHookTracker(), + volumeHelperImpl: volumehelper.NewVolumeHelperImpl( + resourcePolicy, + backupRequest.Spec.SnapshotVolumes, + log, + kb.kbClient, + boolptr.IsSetToTrue(backupRequest.Spec.DefaultVolumesToFsBackup), + !backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()), + ), } // helper struct to send current progress between the main diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 59d00bd77fe..77c3c4e1c9c 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -1335,10 +1335,11 @@ func (a *recordResourcesAction) WithSkippedCSISnapshotFlag(flag bool) *recordRes // verifies that the data in SkippedPVTracker is updated as expected. func TestBackupItemActionsForSkippedPV(t *testing.T) { tests := []struct { - name string - backupReq *Request - apiResources []*test.APIResource - actions []*recordResourcesAction + name string + backupReq *Request + apiResources []*test.APIResource + runtimeResources []runtime.Object + actions []*recordResourcesAction // {pvName:{approach: reason}} expectSkippedPVs map[string]map[string]string expectNotSkippedPVs []string @@ -1350,10 +1351,17 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) { SkippedPVTracker: NewSkipPVTracker(), }, apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + ), test.PVCs( - builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Result(), + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Phase(corev1.ClaimBound).Result(), ), }, + runtimeResources: []runtime.Object{ + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Phase(corev1.ClaimBound).Result(), + }, actions: []*recordResourcesAction{ new(recordResourcesAction).WithName(csiBIAPluginName).ForNamespace("ns-1").ForResource("persistentvolumeclaims").WithSkippedCSISnapshotFlag(true), }, @@ -1379,9 +1387,12 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) { }, apiResources: []*test.APIResource{ test.PVCs( - builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Result(), + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Phase(corev1.ClaimBound).Result(), ), }, + runtimeResources: []runtime.Object{ + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Phase(corev1.ClaimBound).Result(), + }, actions: []*recordResourcesAction{ new(recordResourcesAction).ForNamespace("ns-1").ForResource("persistentvolumeclaims").WithName(csiBIAPluginName), }, @@ -1399,7 +1410,9 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) { var ( h = newHarness(t) backupFile = bytes.NewBuffer([]byte{}) + fakeClient = test.NewFakeControllerRuntimeClient(t, tc.runtimeResources...) ) + h.backupper.kbClient = fakeClient for _, resource := range tc.apiResources { h.addItems(t, resource) diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index 1e35ea38568..c6631cb2435 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -75,6 +75,7 @@ type itemBackupper struct { itemHookHandler hook.ItemHookHandler snapshotLocationVolumeSnapshotters map[string]vsv1.VolumeSnapshotter hookTracker *hook.HookTracker + volumeHelperImpl volumehelper.VolumeHelper } type FileForArchive struct { @@ -188,16 +189,6 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti ib.trackSkippedPV(obj, groupResource, podVolumeApproach, fmt.Sprintf("opted out due to annotation in pod %s", podName), log) } - // Instantiate volumepolicyhelper struct here - vh := &volumehelper.VolumeHelperImpl{ - SnapshotVolumes: ib.backupRequest.Spec.SnapshotVolumes, - Logger: logger, - } - - if ib.backupRequest.ResPolicies != nil { - vh.VolumePolicy = ib.backupRequest.ResPolicies - } - if groupResource == kuberesource.Pods { // pod needs to be initialized for the unstructured converter pod = new(corev1api.Pod) @@ -209,7 +200,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti // Get the list of volumes to back up using pod volume backup from the pod's annotations or volume policy approach. Remove from this list // any volumes that use a PVC that we've already backed up (this would be in a read-write-many scenario, // where it's been backed up from another pod), since we don't need >1 backup per PVC. - includedVolumes, optedOutVolumes, err := vh.GetVolumesForFSBackup(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToFsBackup), !ib.backupRequest.ResourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()), ib.kbClient) + includedVolumes, optedOutVolumes, err := ib.volumeHelperImpl.GetVolumesForFSBackup(pod) if err != nil { backupErrs = append(backupErrs, errors.WithStack(err)) } @@ -239,7 +230,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti // the group version of the object. versionPath := resourceVersion(obj) - updatedObj, additionalItemFiles, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata, finalize, vh) + updatedObj, additionalItemFiles, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata, finalize) if err != nil { backupErrs = append(backupErrs, err) @@ -265,7 +256,7 @@ func (ib *itemBackupper) backupItemInternal(logger logrus.FieldLogger, obj runti backupErrs = append(backupErrs, err) } - if err := ib.takePVSnapshot(obj, log, vh); err != nil { + if err := ib.takePVSnapshot(obj, log); err != nil { backupErrs = append(backupErrs, err) } } @@ -361,7 +352,6 @@ func (ib *itemBackupper) executeActions( name, namespace string, metadata metav1.Object, finalize bool, - vh *volumehelper.VolumeHelperImpl, ) (runtime.Unstructured, []FileForArchive, error) { var itemFiles []FileForArchive for _, action := range ib.backupRequest.ResolvedActions { @@ -385,32 +375,45 @@ func (ib *itemBackupper) executeActions( continue } - if groupResource == kuberesource.PersistentVolumeClaims && actionName == csiBIAPluginName && vh.VolumePolicy != nil { - snapshotVolume, err := vh.ShouldPerformSnapshot(obj, kuberesource.PersistentVolumeClaims, ib.kbClient) - if err != nil { - return nil, itemFiles, errors.WithStack(err) + if groupResource == kuberesource.PersistentVolumeClaims { + if actionName == csiBIAPluginName { + snapshotVolume, err := ib.volumeHelperImpl.ShouldPerformSnapshot(obj, kuberesource.PersistentVolumeClaims) + if err != nil { + return nil, itemFiles, errors.WithStack(err) + } + + if !snapshotVolume { + log.Info(fmt.Sprintf("skipping csi volume snapshot for PVC %s as it does not fit the volume policy criteria specified by the user for snapshot action", namespace+"/"+name)) + ib.trackSkippedPV(obj, kuberesource.PersistentVolumeClaims, volumeSnapshotApproach, "does not satisfy the criteria for volume policy based snapshot action", log) + continue + } } - if !snapshotVolume { - log.Info(fmt.Sprintf("skipping csi volume snapshot for PVC %s as it does not fit the volume policy criteria specified by the user for snapshot action", namespace+"/"+name)) - ib.trackSkippedPV(obj, kuberesource.PersistentVolumeClaims, volumeSnapshotApproach, "does not satisfy the criteria for volume policy based snapshot action", log) + // TODO: need to communicate with @yangxi, if the vSphere plugin also uses the VolumePolicy, + // we can remove this logic, and align vSphere plugin with CSI plugin. + if actionName == vsphereBIAPluginName && + boolptr.IsSetToFalse(ib.backupRequest.Spec.SnapshotMoveData) { + log.Info(fmt.Sprintf("skipping vSphere snapshot for PVC %s as backup's SnapshotVolumes is set to false", namespace+"/"+name)) + ib.trackSkippedPV(obj, kuberesource.PersistentVolumeClaims, volumeSnapshotApproach, "backup SnapshotVolumes set to false for vSphere snapshot", log) continue } + // the snapshot has been taken by the BIA plugin + ib.unTrackSkippedPV(obj, kuberesource.PersistentVolumeClaims, log) } updatedItem, additionalItemIdentifiers, operationID, postOperationItems, err := action.Execute(obj, ib.backupRequest.Backup) if err != nil { return nil, itemFiles, errors.Wrapf(err, "error executing custom action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name) } + u := &unstructured.Unstructured{Object: updatedItem.UnstructuredContent()} if actionName == csiBIAPluginName && additionalItemIdentifiers == nil && u.GetAnnotations()[velerov1api.SkippedNoCSIPVAnnotation] == "true" { // snapshot was skipped by CSI plugin + log.Infof("skip CSI snapshot for PVC %s as it's not a CSI compatible volume", namespace+"/"+name) ib.trackSkippedPV(obj, groupResource, csiSnapshotApproach, "skipped b/c it's not a CSI volume", log) delete(u.GetAnnotations(), velerov1api.SkippedNoCSIPVAnnotation) - } else if (actionName == csiBIAPluginName || actionName == vsphereBIAPluginName) && !boolptr.IsSetToFalse(ib.backupRequest.Backup.Spec.SnapshotVolumes) { - // the snapshot has been taken by the BIA plugin - ib.unTrackSkippedPV(obj, groupResource, log) } + mustInclude := u.GetAnnotations()[velerov1api.MustIncludeAdditionalItemAnnotation] == "true" || finalize // remove the annotation as it's for communication between BIA and velero server, // we don't want the resource be restored with this annotation. @@ -528,7 +531,7 @@ const ( // takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided // backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud // disk type and IOPS (if applicable) to be able to restore to current state later. -func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.FieldLogger, vh *volumehelper.VolumeHelperImpl) error { +func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.FieldLogger) error { log.Info("Executing takePVSnapshot") pv := new(corev1api.PersistentVolume) @@ -538,23 +541,14 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie log = log.WithField("persistentVolume", pv.Name) - if vh.VolumePolicy != nil { - snapshotVolume, err := vh.ShouldPerformSnapshot(obj, kuberesource.PersistentVolumes, ib.kbClient) - - if err != nil { - return err - } - - if !snapshotVolume { - log.Info(fmt.Sprintf("skipping volume snapshot for PV %s as it does not fit the volume policy criteria specified by the user for snapshot action", pv.Name)) - ib.trackSkippedPV(obj, kuberesource.PersistentVolumes, volumeSnapshotApproach, "does not satisfy the criteria for volume policy based snapshot action", log) - return nil - } + snapshotVolume, err := ib.volumeHelperImpl.ShouldPerformSnapshot(obj, kuberesource.PersistentVolumes) + if err != nil { + return err } - if boolptr.IsSetToFalse(ib.backupRequest.Spec.SnapshotVolumes) { - log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.") - ib.trackSkippedPV(obj, kuberesource.PersistentVolumes, volumeSnapshotApproach, "backup has volume snapshots disabled", log) + if !snapshotVolume { + log.Info(fmt.Sprintf("skipping volume snapshot for PV %s as it does not fit the volume policy criteria specified by the user for snapshot action", pv.Name)) + ib.trackSkippedPV(obj, kuberesource.PersistentVolumes, volumeSnapshotApproach, "does not satisfy the criteria for volume policy based snapshot action", log) return nil } diff --git a/pkg/restore/pv_restorer.go b/pkg/restore/pv_restorer.go index 22cb8f09a0b..49d2e885fbd 100644 --- a/pkg/restore/pv_restorer.go +++ b/pkg/restore/pv_restorer.go @@ -38,7 +38,6 @@ type PVRestorer interface { type pvRestorer struct { logger logrus.FieldLogger backup *api.Backup - snapshotVolumes *bool restorePVs *bool volumeSnapshots []*volume.Snapshot volumeSnapshotterGetter VolumeSnapshotterGetter @@ -53,11 +52,6 @@ func (r *pvRestorer) executePVAction(obj *unstructured.Unstructured) (*unstructu return nil, errors.New("PersistentVolume is missing its name") } - if boolptr.IsSetToFalse(r.snapshotVolumes) { - // The backup had snapshots disabled, so we can return early - return obj, nil - } - if boolptr.IsSetToFalse(r.restorePVs) { // The restore has pv restores disabled, so we can return early return obj, nil diff --git a/pkg/restore/pv_restorer_test.go b/pkg/restore/pv_restorer_test.go index 98cadd88f22..af7729b54fd 100644 --- a/pkg/restore/pv_restorer_test.go +++ b/pkg/restore/pv_restorer_test.go @@ -125,7 +125,6 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { } if tc.backup != nil { r.backup = tc.backup - r.snapshotVolumes = tc.backup.Spec.SnapshotVolumes } for _, loc := range tc.locations { diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 5e7d5a0a4d6..63c26538bac 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -270,7 +270,6 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( pvRestorer := &pvRestorer{ logger: req.Log, backup: req.Backup, - snapshotVolumes: req.Backup.Spec.SnapshotVolumes, restorePVs: req.Restore.Spec.RestorePVs, volumeSnapshots: req.VolumeSnapshots, volumeSnapshotterGetter: volumeSnapshotterGetter,