From fdafb1eaed3f211f2d1eddcc3be53b469ef03d71 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Mon, 5 Mar 2018 18:14:19 -0500 Subject: [PATCH] UPSTREAM: : Revert "Merge pull request #18554 from rootfs/pr-58177" This reverts commit 8fd1640932422c45396b7103cf9727df0210eb9d, reversing changes made to 3522daf765c7ff82066b434bf7be035912ed9164. --- .../kubernetes/pkg/kubelet/config/config.go | 2 +- .../cache/actual_state_of_world.go | 46 +--- .../cache/actual_state_of_world_test.go | 36 +-- .../cache/desired_state_of_world.go | 21 +- .../cache/desired_state_of_world_test.go | 29 --- .../desired_state_of_world_populator.go | 1 - .../volumemanager/reconciler/reconciler.go | 218 +++++++++--------- .../reconciler/reconciler_test.go | 7 +- .../k8s.io/kubernetes/pkg/util/mount/mount.go | 19 -- .../operationexecutor/operation_executor.go | 50 ++-- .../operationexecutor/operation_generator.go | 54 +++-- 11 files changed, 182 insertions(+), 301 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go index c03e32512afd..d131c6ce3950 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go @@ -96,7 +96,7 @@ func (c *PodConfig) SeenAllSources(seenSources sets.String) bool { if c.pods == nil { return false } - glog.V(5).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources) + glog.V(6).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources) return seenSources.HasAll(c.sources.List()...) && c.pods.seenSources(c.sources.List()...) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 9243ef3329b6..c22049e7bcaf 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -73,7 +73,7 @@ type ActualStateOfWorld interface { // must unmounted prior to detach. // If a volume with the name volumeName does not exist in the list of // attached volumes, an error is returned. - SetVolumeGloballyMounted(volumeName v1.UniqueVolumeName, globallyMounted bool, devicePath, deviceMountPath string) error + SetVolumeGloballyMounted(volumeName v1.UniqueVolumeName, globallyMounted bool) error // DeletePodFromVolume removes the given pod from the given volume in the // cache indicating the volume has been successfully unmounted from the pod. @@ -109,13 +109,6 @@ type ActualStateOfWorld interface { // volumes that do not need to update contents should not fail. PodExistsInVolume(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (bool, string, error) - // VolumeExistsWithSpecName returns true if the given volume specified with the - // volume spec name (a.k.a., InnerVolumeSpecName) exists in the list of - // volumes that should be attached to this node. - // If a pod with the same name does not exist under the specified - // volume, false is returned. - VolumeExistsWithSpecName(podName volumetypes.UniquePodName, volumeSpecName string) bool - // VolumeExists returns true if the given volume exists in the list of // attached volumes in the cache, indicating the volume is attached to this // node. @@ -247,10 +240,6 @@ type attachedVolume struct { // devicePath contains the path on the node where the volume is attached for // attachable volumes devicePath string - - // deviceMountPath contains the path on the node where the device should - // be mounted after it is attached. - deviceMountPath string } // The mountedPod object represents a pod for which the kubelet volume manager @@ -329,13 +318,13 @@ func (asw *actualStateOfWorld) MarkVolumeAsUnmounted( } func (asw *actualStateOfWorld) MarkDeviceAsMounted( - volumeName v1.UniqueVolumeName, devicePath, deviceMountPath string) error { - return asw.SetVolumeGloballyMounted(volumeName, true /* globallyMounted */, devicePath, deviceMountPath) + volumeName v1.UniqueVolumeName) error { + return asw.SetVolumeGloballyMounted(volumeName, true /* globallyMounted */) } func (asw *actualStateOfWorld) MarkDeviceAsUnmounted( volumeName v1.UniqueVolumeName) error { - return asw.SetVolumeGloballyMounted(volumeName, false /* globallyMounted */, "", "") + return asw.SetVolumeGloballyMounted(volumeName, false /* globallyMounted */) } // addVolume adds the given volume to the cache indicating the specified @@ -465,7 +454,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired( } func (asw *actualStateOfWorld) SetVolumeGloballyMounted( - volumeName v1.UniqueVolumeName, globallyMounted bool, devicePath, deviceMountPath string) error { + volumeName v1.UniqueVolumeName, globallyMounted bool) error { asw.Lock() defer asw.Unlock() @@ -477,8 +466,6 @@ func (asw *actualStateOfWorld) SetVolumeGloballyMounted( } volumeObj.globallyMounted = globallyMounted - volumeObj.deviceMountPath = deviceMountPath - volumeObj.devicePath = devicePath asw.attachedVolumes[volumeName] = volumeObj return nil } @@ -542,19 +529,6 @@ func (asw *actualStateOfWorld) PodExistsInVolume( return podExists, volumeObj.devicePath, nil } -func (asw *actualStateOfWorld) VolumeExistsWithSpecName(podName volumetypes.UniquePodName, volumeSpecName string) bool { - asw.RLock() - defer asw.RUnlock() - for _, volumeObj := range asw.attachedVolumes { - for name := range volumeObj.mountedPods { - if podName == name && volumeObj.spec.Name() == volumeSpecName { - return true - } - } - } - return false -} - func (asw *actualStateOfWorld) VolumeExists( volumeName v1.UniqueVolumeName) bool { asw.RLock() @@ -651,11 +625,8 @@ func (asw *actualStateOfWorld) newAttachedVolume( VolumeSpec: attachedVolume.spec, NodeName: asw.nodeName, PluginIsAttachable: attachedVolume.pluginIsAttachable, - DevicePath: attachedVolume.devicePath, - DeviceMountPath: attachedVolume.deviceMountPath, - PluginName: attachedVolume.pluginName}, - GloballyMounted: attachedVolume.globallyMounted, - } + DevicePath: attachedVolume.devicePath}, + GloballyMounted: attachedVolume.globallyMounted} } // Compile-time check to ensure volumeNotAttachedError implements the error interface @@ -720,6 +691,5 @@ func getMountedVolume( Mounter: mountedPod.mounter, BlockVolumeMapper: mountedPod.blockVolumeMapper, VolumeGidValue: mountedPod.volumeGidValue, - VolumeSpec: attachedVolume.spec, - DeviceMountPath: attachedVolume.deviceMountPath}} + VolumeSpec: attachedVolume.spec}} } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go index e408c5d270b0..e2c51812d44f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go @@ -222,7 +222,6 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) { verifyVolumeDoesntExistInUnmountedVolumes(t, generatedVolumeName, asw) verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName, asw) verifyPodExistsInVolumeAsw(t, podName, generatedVolumeName, "fake/device/path" /* expectedDevicePath */, asw) - verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName, volumeSpec.Name(), asw) } // Populates data struct with a volume @@ -293,7 +292,6 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) { verifyVolumeDoesntExistInUnmountedVolumes(t, generatedVolumeName, asw) verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName, asw) verifyPodExistsInVolumeAsw(t, podName, generatedVolumeName, "fake/device/path" /* expectedDevicePath */, asw) - verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName, volumeSpec.Name(), asw) } // Calls AddPodToVolume() to add pod to empty data stuct @@ -372,7 +370,6 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) { volumeName, false, /* expectVolumeToExist */ asw) - verifyVolumeDoesntExistWithSpecNameInVolumeAsw(t, podName, volumeSpec.Name(), asw) } // Calls MarkVolumeAsAttached() once to add volume @@ -403,7 +400,6 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} devicePath := "fake/device/path" - deviceMountPath := "fake/device/mount/path" generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) @@ -412,7 +408,7 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) { } // Act - err = asw.MarkDeviceAsMounted(generatedVolumeName, devicePath, deviceMountPath) + err = asw.MarkDeviceAsMounted(generatedVolumeName) // Assert if err != nil { @@ -550,33 +546,3 @@ func verifyPodDoesntExistInVolumeAsw( devicePath) } } - -func verifyVolumeExistsWithSpecNameInVolumeAsw( - t *testing.T, - expectedPodName volumetypes.UniquePodName, - expectedVolumeName string, - asw ActualStateOfWorld) { - podExistsInVolume := - asw.VolumeExistsWithSpecName(expectedPodName, expectedVolumeName) - - if !podExistsInVolume { - t.Fatalf( - "ASW VolumeExistsWithSpecName result invalid. Expected: Actual: <%v>", - podExistsInVolume) - } -} - -func verifyVolumeDoesntExistWithSpecNameInVolumeAsw( - t *testing.T, - podToCheck volumetypes.UniquePodName, - volumeToCheck string, - asw ActualStateOfWorld) { - podExistsInVolume := - asw.VolumeExistsWithSpecName(podToCheck, volumeToCheck) - - if podExistsInVolume { - t.Fatalf( - "ASW VolumeExistsWithSpecName result invalid. Expected: Actual: <%v>", - podExistsInVolume) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index 812c885939ae..e26131335c54 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -98,13 +98,6 @@ type DesiredStateOfWorld interface { // with pod's unique name. This map can be used to determine which pod is currently // in desired state of world. GetPods() map[types.UniquePodName]bool - - // VolumeExistsWithSpecName returns true if the given volume specified with the - // volume spec name (a.k.a., InnerVolumeSpecName) exists in the list of - // volumes that should be attached to this node. - // If a pod with the same name does not exist under the specified - // volume, false is returned. - VolumeExistsWithSpecName(podName types.UniquePodName, volumeSpecName string) bool } // VolumeToMount represents a volume that is attached to this node and needs to @@ -241,6 +234,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( spec: volumeSpec, outerVolumeSpecName: outerVolumeSpecName, } + return volumeName, nil } @@ -309,19 +303,6 @@ func (dsw *desiredStateOfWorld) PodExistsInVolume( return podExists } -func (dsw *desiredStateOfWorld) VolumeExistsWithSpecName(podName types.UniquePodName, volumeSpecName string) bool { - dsw.RLock() - defer dsw.RUnlock() - for _, volumeObj := range dsw.volumesToMount { - for name, podObj := range volumeObj.podsToMount { - if podName == name && podObj.spec.Name() == volumeSpecName { - return true - } - } - } - return false -} - func (dsw *desiredStateOfWorld) GetPods() map[types.UniquePodName]bool { dsw.RLock() defer dsw.RUnlock() diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go index 849a607fecae..506f7f9ad313 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go @@ -69,7 +69,6 @@ func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) { verifyVolumeExistsInVolumesToMount( t, generatedVolumeName, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw) - verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw) } // Calls AddPodToVolume() twice to add the same pod to the same volume @@ -114,7 +113,6 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) { verifyVolumeExistsInVolumesToMount( t, generatedVolumeName, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw) - verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw) } // Populates data struct with a new volume/pod @@ -162,7 +160,6 @@ func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) { verifyVolumeDoesntExist(t, generatedVolumeName, dsw) verifyVolumeDoesntExistInVolumesToMount(t, generatedVolumeName, dsw) verifyPodDoesntExistInVolumeDsw(t, podName, generatedVolumeName, dsw) - verifyVolumeDoesntExistWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw) } // Calls AddPodToVolume() to add three new volumes to data struct @@ -383,29 +380,3 @@ func verifyPodDoesntExistInVolumeDsw( podExistsInVolume) } } - -func verifyVolumeExistsWithSpecNameInVolumeDsw( - t *testing.T, - expectedPodName volumetypes.UniquePodName, - expectedVolumeSpecName string, - dsw DesiredStateOfWorld) { - if podExistsInVolume := dsw.VolumeExistsWithSpecName( - expectedPodName, expectedVolumeSpecName); !podExistsInVolume { - t.Fatalf( - "DSW VolumeExistsWithSpecNam returned incorrect value. Expected: Actual: <%v>", - podExistsInVolume) - } -} - -func verifyVolumeDoesntExistWithSpecNameInVolumeDsw( - t *testing.T, - expectedPodName volumetypes.UniquePodName, - expectedVolumeSpecName string, - dsw DesiredStateOfWorld) { - if podExistsInVolume := dsw.VolumeExistsWithSpecName( - expectedPodName, expectedVolumeSpecName); podExistsInVolume { - t.Fatalf( - "DSW VolumeExistsWithSpecNam returned incorrect value. Expected: Actual: <%v>", - podExistsInVolume) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 49d957dd5473..9a2685f4d123 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -127,7 +127,6 @@ type processedPods struct { func (dswp *desiredStateOfWorldPopulator) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) { // Wait for the completion of a loop that started after sources are all ready, then set hasAddedPods accordingly - glog.Infof("Desired state populator starts to run") wait.PollUntil(dswp.loopSleepDuration, func() (bool, error) { done := sourcesReady.AllReady() dswp.populatorLoopFunc()() diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go index 8815cfb12a15..8865a0043a04 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -140,18 +140,26 @@ type reconciler struct { } func (rc *reconciler) Run(stopCh <-chan struct{}) { - wait.Until(rc.reconciliationLoopFunc(), rc.loopSleepDuration, stopCh) + // Wait for the populator to indicate that it has actually populated the desired state of world, meaning it has + // completed a populate loop that started after sources are all ready. After, there's no need to keep checking. + wait.PollUntil(rc.loopSleepDuration, func() (bool, error) { + rc.reconciliationLoopFunc(rc.populatorHasAddedPods())() + return rc.populatorHasAddedPods(), nil + }, stopCh) + wait.Until(rc.reconciliationLoopFunc(true), rc.loopSleepDuration, stopCh) } -func (rc *reconciler) reconciliationLoopFunc() func() { +func (rc *reconciler) reconciliationLoopFunc(populatorHasAddedPods bool) func() { return func() { rc.reconcile() - // Sync the state with the reality once after all existing pods are added to the desired state from all sources. - // Otherwise, the reconstruct process may clean up pods' volumes that are still in use because - // desired state of world does not contain a complete list of pods. - if rc.populatorHasAddedPods() && !rc.StatesHasBeenSynced() { - glog.Infof("Reconciler: start to sync state") + // Add a check that the populator has added pods so that reconciler's reconstruct process will start + // after desired state of world is populated with pod volume information. Otherwise, reconciler's + // reconstruct process may add incomplete volume information and cause confusion. In addition, if the + // desired state of world has not been populated yet, the reconstruct process may clean up pods' volumes + // that are still in use because desired state of world does not contain a complete list of pods. + if populatorHasAddedPods && time.Since(rc.timeOfLastSync) > rc.syncDuration { + glog.V(5).Infof("Desired state of world has been populated with pods, starting reconstruct state function") rc.sync() } } @@ -311,11 +319,11 @@ func (rc *reconciler) reconcile() { // sync process tries to observe the real world by scanning all pods' volume directories from the disk. // If the actual and desired state of worlds are not consistent with the observed world, it means that some // mounted volumes are left out probably during kubelet restart. This process will reconstruct -// the volumes and udpate the actual and desired states. For the volumes that cannot support reconstruction, -// it will try to clean up the mount paths with operation executor. +// the volumes and udpate the actual and desired states. In the following reconciler loop, those volumes will +// be cleaned up. func (rc *reconciler) sync() { defer rc.updateLastSyncTime() - rc.syncStates() + rc.syncStates(rc.kubeletPodsDir) } func (rc *reconciler) updateLastSyncTime() { @@ -340,7 +348,7 @@ type reconstructedVolume struct { volumeSpec *volumepkg.Spec outerVolumeSpecName string pod *v1.Pod - attachablePlugin volumepkg.AttachableVolumePlugin + pluginIsAttachable bool volumeGidValue string devicePath string reportedInUse bool @@ -348,41 +356,66 @@ type reconstructedVolume struct { blockVolumeMapper volumepkg.BlockVolumeMapper } -// syncStates scans the volume directories under the given pod directory. -// If the volume is not in desired state of world, this function will reconstruct -// the volume related information and put it in both the actual and desired state of worlds. -// For some volume plugins that cannot support reconstruction, it will clean up the existing -// mount points since the volume is no long needed (removed from desired state) -func (rc *reconciler) syncStates() { +// reconstructFromDisk scans the volume directories under the given pod directory. If the volume is not +// in either actual or desired state of world, or pending operation, this function will reconstruct +// the volume spec and put it in both the actual and desired state of worlds. If no running +// container is mounting the volume, the volume will be removed by desired state of world's populator and +// cleaned up by the reconciler. +func (rc *reconciler) syncStates(podsDir string) { // Get volumes information by reading the pod's directory - podVolumes, err := getVolumesFromPodDir(rc.kubeletPodsDir) + podVolumes, err := getVolumesFromPodDir(podsDir) if err != nil { glog.Errorf("Cannot get volumes from disk %v", err) return } + volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume) for _, volume := range podVolumes { - if rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) { - glog.V(4).Info("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) - continue - } - if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) { - glog.V(4).Info("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) - continue - } reconstructedVolume, err := rc.reconstructVolume(volume) if err != nil { - glog.Warning("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err) - rc.cleanupMounts(volume) + glog.Errorf("Could not construct volume information: %v", err) continue } - if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) { - glog.Warning("Volume is in pending operation, skip cleaning up mounts") + // Check if there is an pending operation for the given pod and volume. + // Need to check pending operation before checking the actual and desired + // states to avoid race condition during checking. For example, the following + // might happen if pending operation is checked after checking actual and desired states. + // 1. Checking the pod and it does not exist in either actual or desired state. + // 2. An operation for the given pod finishes and the actual state is updated. + // 3. Checking and there is no pending operation for the given pod. + // During state reconstruction period, no new volume operations could be issued. If the + // mounted path is not in either pending operation, or actual or desired states, this + // volume needs to be reconstructed back to the states. + pending := rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, reconstructedVolume.podName) + dswExist := rc.desiredStateOfWorld.PodExistsInVolume(reconstructedVolume.podName, reconstructedVolume.volumeName) + aswExist, _, _ := rc.actualStateOfWorld.PodExistsInVolume(reconstructedVolume.podName, reconstructedVolume.volumeName) + + if !rc.StatesHasBeenSynced() { + // In case this is the first time to reconstruct state after kubelet starts, for a persistant volume, it must have + // been mounted before kubelet restarts because no mount operations could be started at this time (node + // status has not yet been updated before this very first syncStates finishes, so that VerifyControllerAttachedVolume will fail), + // In this case, the volume state should be put back to actual state now no matter desired state has it or not. + // This is to prevent node status from being updated to empty for attachable volumes. This might happen because + // in the case that a volume is discovered on disk, and it is part of desired state, but is then quickly deleted + // from the desired state. If in such situation, the volume is not added to the actual state, the node status updater will + // not get this volume from either actual or desired state. In turn, this might cause master controller + // detaching while the volume is still mounted. + if aswExist || !reconstructedVolume.pluginIsAttachable { + continue + } + } else { + // Check pending first since no new operations could be started at this point. + // Otherwise there might a race condition in checking actual states and pending operations + if pending || dswExist || aswExist { + continue + } } + glog.V(2).Infof( - "Reconciler sync states: could not find pod information in desired state, update it in actual state: %+v", + "Reconciler sync states: could not find pod information in desired or actual states or pending operation, update it in both states: %+v", reconstructedVolume) volumesNeedUpdate[reconstructedVolume.volumeName] = reconstructedVolume + } if len(volumesNeedUpdate) > 0 { @@ -393,31 +426,7 @@ func (rc *reconciler) syncStates() { } -func (rc *reconciler) cleanupMounts(volume podVolume) { - glog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points", - volume.podName, volume.volumeSpecName) - mountedVolume := operationexecutor.MountedVolume{ - PodName: volume.podName, - VolumeName: v1.UniqueVolumeName(volume.volumeSpecName), - InnerVolumeSpecName: volume.volumeSpecName, - PluginName: volume.pluginName, - PodUID: types.UID(volume.podName), - } - volumeHandler, err := operationexecutor.NewVolumeHandlerWithMode(volume.volumeMode, rc.operationExecutor) - if err != nil { - glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountVolume failed"), err).Error()) - return - } - // TODO: Currently cleanupMounts only includes UnmountVolume operation. In the next PR, we will add - // to unmount both volume and device in the same routine. - err = volumeHandler.UnmountVolumeHandler(mountedVolume, rc.actualStateOfWorld) - if err != nil { - glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error()) - return - } -} - -// Reconstruct volume data structure by reading the pod's volume directories +// Reconstruct Volume object and reconstructedVolume data structure by reading the pod's volume directories func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, error) { // plugin initializations plugin, err := rc.volumePluginMgr.FindPluginByName(volume.pluginName) @@ -429,17 +438,23 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, return nil, err } - // Create pod object + // Create volumeSpec pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: types.UID(volume.podName), }, } - volumeHandler, err := operationexecutor.NewVolumeHandlerWithMode(volume.volumeMode, rc.operationExecutor) - if err != nil { - return nil, err + // TODO: remove feature gate check after no longer needed + var mapperPlugin volumepkg.BlockVolumePlugin + tmpSpec := &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}} + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + mapperPlugin, err = rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName) + if err != nil { + return nil, err + } + tmpSpec = &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volume.volumeMode}}} } - mapperPlugin, err := rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName) + volumeHandler, err := operationexecutor.NewVolumeHandler(tmpSpec, rc.operationExecutor) if err != nil { return nil, err } @@ -465,6 +480,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, } else { uniqueVolumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec) } + // Check existence of mount point for filesystem volume or symbolic link for block volume isExist, checkErr := volumeHandler.CheckVolumeExistence(volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin) if checkErr != nil { @@ -491,7 +507,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, // TODO: remove feature gate check after no longer needed var volumeMapper volumepkg.BlockVolumeMapper - if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && volume.volumeMode == v1.PersistentVolumeBlock { + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { var newMapperErr error if mapperPlugin != nil { volumeMapper, newMapperErr = mapperPlugin.NewBlockVolumeMapper( @@ -514,24 +530,23 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, volumeName: uniqueVolumeName, podName: volume.podName, volumeSpec: volumeSpec, - // volume.volumeSpecName is actually InnerVolumeSpecName. It will not be used + // volume.volumeSpecName is actually InnerVolumeSpecName. But this information will likely to be updated in updateStates() + // by checking the desired state volumeToMount list and getting the real OuterVolumeSpecName. + // In case the pod is deleted during this period and desired state does not have this information, it will not be used // for volume cleanup. - // TODO: in case pod is added back before reconciler starts to unmount, we can update this field from desired state information outerVolumeSpecName: volume.volumeSpecName, pod: pod, - attachablePlugin: attachablePlugin, + pluginIsAttachable: attachablePlugin != nil, volumeGidValue: "", - // devicePath is updated during updateStates() by checking node status's VolumesAttached data. - // TODO: get device path directly from the volume mount path. - devicePath: "", - mounter: volumeMounter, - blockVolumeMapper: volumeMapper, + devicePath: "", + mounter: volumeMounter, + blockVolumeMapper: volumeMapper, } return reconstructedVolume, nil } -// updateDevicePath gets the node status to retrieve volume device path information. -func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) { +func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error { + // Get the node status to retrieve volume device path information. node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) if fetchErr != nil { glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) @@ -544,42 +559,26 @@ func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName } } } -} - -func getDeviceMountPath(volume *reconstructedVolume) (string, error) { - volumeAttacher, err := volume.attachablePlugin.NewAttacher() - if volumeAttacher == nil || err != nil { - return "", err - } - deviceMountPath, err := - volumeAttacher.GetDeviceMountPath(volume.volumeSpec) - if err != nil { - return "", err - } - if volume.blockVolumeMapper != nil { - deviceMountPath, err = - volume.blockVolumeMapper.GetGlobalMapPath(volume.volumeSpec) - if err != nil { - return "", err + // Get the list of volumes from desired state and update OuterVolumeSpecName if the information is available + volumesToMount := rc.desiredStateOfWorld.GetVolumesToMount() + for _, volumeToMount := range volumesToMount { + if volume, exists := volumesNeedUpdate[volumeToMount.VolumeName]; exists { + volume.outerVolumeSpecName = volumeToMount.OuterVolumeSpecName + volumesNeedUpdate[volumeToMount.VolumeName] = volume + glog.V(4).Infof("Update OuterVolumeSpecName from desired state for volume (%q): %q", + volumeToMount.VolumeName, volume.outerVolumeSpecName) } } - return deviceMountPath, nil -} - -func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error { - // Get the node status to retrieve volume device path information. - rc.updateDevicePath(volumesNeedUpdate) - for _, volume := range volumesNeedUpdate { err := rc.actualStateOfWorld.MarkVolumeAsAttached( - //TODO: the devicePath might not be correct for some volume plugins: see issue #54108 volume.volumeName, volume.volumeSpec, "" /* nodeName */, volume.devicePath) if err != nil { glog.Errorf("Could not add volume information to actual state of world: %v", err) continue } - err = rc.actualStateOfWorld.MarkVolumeAsMounted( + + err = rc.actualStateOfWorld.AddPodToVolume( volume.podName, types.UID(volume.podName), volume.volumeName, @@ -591,19 +590,22 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re glog.Errorf("Could not add pod to volume information to actual state of world: %v", err) continue } - glog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName) - if volume.attachablePlugin != nil { - deviceMountPath, err := getDeviceMountPath(volume) - if err != nil { - glog.Errorf("Could not find device mount path for volume %s", volume.volumeName) - continue - } - err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName, volume.devicePath, deviceMountPath) + if volume.pluginIsAttachable { + err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName) if err != nil { glog.Errorf("Could not mark device is mounted to actual state of world: %v", err) continue } - glog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName) + glog.Infof("Volume: %v is mounted", volume.volumeName) + } + + _, err = rc.desiredStateOfWorld.AddPodToVolume(volume.podName, + volume.pod, + volume.volumeSpec, + volume.outerVolumeSpecName, + volume.volumeGidValue) + if err != nil { + glog.Errorf("Could not add pod to volume information to desired state of world: %v", err) } } return nil @@ -665,6 +667,6 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { } } } - glog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes) + glog.V(10).Infof("Get volumes from pod directory %q %+v", podDir, volumes) return volumes, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index 187171ebda9b..32cd954d2ec7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -933,8 +933,7 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) { false, /* checkNodeCapabilitiesBeforeMount */ nil)) var mounter mount.Interface - plugins := volumetesting.NewFakeFileVolumePlugin() - deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: &volume.Spec{}, PluginName: plugins[0].GetPluginName()} + deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: &volume.Spec{}} err := oex.UnmapDevice(deviceToDetach, asw, mounter) // Assert if assert.Error(t, err) { @@ -950,7 +949,7 @@ func waitForMount( volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) { err := retryWithExponentialBackOff( - time.Duration(500*time.Millisecond), + time.Duration(5*time.Millisecond), func() (bool, error) { mountedVolumes := asw.GetMountedVolumes() for _, mountedVolume := range mountedVolumes { @@ -974,7 +973,7 @@ func waitForDetach( volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) { err := retryWithExponentialBackOff( - time.Duration(500*time.Millisecond), + time.Duration(5*time.Millisecond), func() (bool, error) { if asw.VolumeExists(volumeName) { return false, nil diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go index c037d8fb1377..d2069ec7861b 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go @@ -21,7 +21,6 @@ package mount import ( "os" "path/filepath" - "strings" ) type FileType string @@ -262,21 +261,3 @@ func isBind(options []string) (bool, []string) { return bind, bindRemountOpts } - -// TODO: this is a workaround for the unmount device issue caused by gci mounter. -// In GCI cluster, if gci mounter is used for mounting, the container started by mounter -// script will cause additional mounts created in the container. Since these mounts are -// irrelavant to the original mounts, they should be not considered when checking the -// mount references. Current solution is to filter out those mount paths that contain -// the string of original mount path. -// Plan to work on better approach to solve this issue. - -func HasMountRefs(mountPath string, mountRefs []string) bool { - count := 0 - for _, ref := range mountRefs { - if !strings.Contains(ref, mountPath) { - count = count + 1 - } - } - return count > 0 -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go index d17414a6aa12..7035802dd57f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go @@ -22,6 +22,7 @@ package operationexecutor import ( "fmt" + "strings" "time" "github.com/golang/glog" @@ -168,7 +169,7 @@ type ActualStateOfWorldMounterUpdater interface { MarkVolumeAsUnmounted(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error // Marks the specified volume as having been globally mounted. - MarkDeviceAsMounted(volumeName v1.UniqueVolumeName, devicePath, deviceMountPath string) error + MarkDeviceAsMounted(volumeName v1.UniqueVolumeName) error // Marks the specified volume as having its global mount unmounted. MarkDeviceAsUnmounted(volumeName v1.UniqueVolumeName) error @@ -385,14 +386,6 @@ type AttachedVolume struct { // DevicePath contains the path on the node where the volume is attached. // For non-attachable volumes this is empty. DevicePath string - - // DeviceMountPath contains the path on the node where the device should - // be mounted after it is attached. - DeviceMountPath string - - // PluginName is the Unescaped Qualified name of the volume plugin used to - // attach and mount this volume. - PluginName string } // GenerateMsgDetailed returns detailed msgs for attached volumes @@ -536,10 +529,6 @@ type MountedVolume struct { // VolumeSpec is a volume spec containing the specification for the volume // that should be mounted. VolumeSpec *volume.Spec - - // DeviceMountPath contains the path on the node where the device should - // be mounted after it is attached. - DeviceMountPath string } // GenerateMsgDetailed returns detailed msgs for mounted volumes @@ -877,21 +866,6 @@ func NewVolumeHandler(volumeSpec *volume.Spec, oe OperationExecutor) (VolumeStat return volumeHandler, nil } -// NewVolumeHandlerWithMode return a new instance of volumeHandler depens on a volumeMode -func NewVolumeHandlerWithMode(volumeMode v1.PersistentVolumeMode, oe OperationExecutor) (VolumeStateHandler, error) { - var volumeHandler VolumeStateHandler - if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - if volumeMode == v1.PersistentVolumeFilesystem { - volumeHandler = NewFilesystemVolumeHandler(oe) - } else { - volumeHandler = NewBlockVolumeHandler(oe) - } - } else { - volumeHandler = NewFilesystemVolumeHandler(oe) - } - return volumeHandler, nil -} - // NewFilesystemVolumeHandler returns a new instance of FilesystemVolumeHandler. func NewFilesystemVolumeHandler(operationExecutor OperationExecutor) FilesystemVolumeHandler { return FilesystemVolumeHandler{ @@ -950,7 +924,7 @@ func (f FilesystemVolumeHandler) UnmountDeviceHandler(attachedVolume AttachedVol // ReconstructVolumeHandler create volumeSpec from mount path // This method is handler for filesystem volume func (f FilesystemVolumeHandler) ReconstructVolumeHandler(plugin volume.VolumePlugin, _ volume.BlockVolumePlugin, _ types.UID, _ volumetypes.UniquePodName, volumeSpecName string, mountPath string, _ string) (*volume.Spec, error) { - glog.V(4).Infof("Starting operationExecutor.ReconstructVolumepodName volume spec name %s, mount path %s", volumeSpecName, mountPath) + glog.V(12).Infof("Starting operationExecutor.ReconstructVolumepodName") volumeSpec, err := plugin.ConstructVolumeSpec(volumeSpecName, mountPath) if err != nil { return nil, err @@ -1050,3 +1024,21 @@ func (b BlockVolumeHandler) CheckVolumeExistence(mountPath, volumeName string, m } return islinkExist, nil } + +// TODO: this is a workaround for the unmount device issue caused by gci mounter. +// In GCI cluster, if gci mounter is used for mounting, the container started by mounter +// script will cause additional mounts created in the container. Since these mounts are +// irrelavant to the original mounts, they should be not considered when checking the +// mount references. Current solution is to filter out those mount paths that contain +// the string of original mount path. +// Plan to work on better approach to solve this issue. + +func hasMountRefs(mountPath string, mountRefs []string) bool { + count := 0 + for _, ref := range mountRefs { + if !strings.Contains(ref, mountPath) { + count = count + 1 + } + } + return count > 0 +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go index f2408b47fb10..7895dc93e92f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go @@ -509,7 +509,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( // Update actual state of world to reflect volume is globally mounted markDeviceMountedErr := actualStateOfWorld.MarkDeviceAsMounted( - volumeToMount.VolumeName, devicePath, deviceMountPath) + volumeToMount.VolumeName) if markDeviceMountedErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("MountVolume.MarkDeviceAsMounted failed", markDeviceMountedErr) @@ -705,7 +705,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( mounter mount.Interface) (volumetypes.GeneratedOperations, error) { // Get attacher plugin attachableVolumePlugin, err := - og.volumePluginMgr.FindAttachablePluginByName(deviceToDetach.PluginName) + og.volumePluginMgr.FindAttachablePluginBySpec(deviceToDetach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindAttachablePluginBySpec failed", err) } @@ -714,11 +714,22 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( if err != nil { return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDetacher failed", err) } + + volumeAttacher, err := attachableVolumePlugin.NewAttacher() + if err != nil { + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewAttacher failed", err) + } + unmountDeviceFunc := func() (error, error) { - deviceMountPath := deviceToDetach.DeviceMountPath + deviceMountPath, err := + volumeAttacher.GetDeviceMountPath(deviceToDetach.VolumeSpec) + if err != nil { + // On failure, return error. Caller will log and retry. + return deviceToDetach.GenerateError("GetDeviceMountPath failed", err) + } refs, err := attachableVolumePlugin.GetDeviceMountRefs(deviceMountPath) - if err != nil || mount.HasMountRefs(deviceMountPath, refs) { + if err != nil || hasMountRefs(deviceMountPath, refs) { if err == nil { err = fmt.Errorf("The device mount path %q is still mounted by other references %v", deviceMountPath, refs) } @@ -815,13 +826,6 @@ func (og *operationGenerator) GenerateMapVolumeFunc( mapVolumeFunc := func() (error, error) { var devicePath string - // Set up global map path under the given plugin directory using symbolic link - globalMapPath, err := - blockVolumeMapper.GetGlobalMapPath(volumeToMount.VolumeSpec) - if err != nil { - // On failure, return error. Caller will log and retry. - return volumeToMount.GenerateError("MapVolume.GetDeviceMountPath failed", err) - } if volumeAttacher != nil { // Wait for attachable volumes to finish attaching glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) @@ -837,7 +841,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( // Update actual state of world to reflect volume is globally mounted markDeviceMappedErr := actualStateOfWorld.MarkDeviceAsMounted( - volumeToMount.VolumeName, devicePath, globalMapPath) + volumeToMount.VolumeName) if markDeviceMappedErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr) @@ -857,13 +861,18 @@ func (og *operationGenerator) GenerateMapVolumeFunc( return volumeToMount.GenerateError("MapVolume failed", fmt.Errorf("Device path of the volume is empty")) } } - + // Set up global map path under the given plugin directory using symbolic link + globalMapPath, err := + blockVolumeMapper.GetGlobalMapPath(volumeToMount.VolumeSpec) + if err != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateError("MapVolume.GetDeviceMountPath failed", err) + } mapErr = og.blkUtil.MapDevice(devicePath, globalMapPath, string(volumeToMount.Pod.UID)) if mapErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("MapVolume.MapDevice failed", mapErr) } - // Device mapping for global map path succeeded simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath)) verbosity := glog.Level(4) @@ -958,7 +967,8 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( } // Try to unmap podUID symlink under global map path dir // plugins/kubernetes.io/{PluginName}/volumeDevices/{volumePluginDependentPath}/{podUID} - globalUnmapPath := volumeToUnmount.DeviceMountPath + globalUnmapPath, err := + blockVolumeUnmapper.GetGlobalMapPath(volumeToUnmount.VolumeSpec) if err != nil { // On failure, return error. Caller will log and retry. return volumeToUnmount.GenerateError("UnmapVolume.GetGlobalUnmapPath failed", err) @@ -1012,14 +1022,23 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (volumetypes.GeneratedOperations, error) { + // Get block volume mapper plugin + var blockVolumeMapper volume.BlockVolumeMapper blockVolumePlugin, err := - og.volumePluginMgr.FindMapperPluginByName(deviceToDetach.PluginName) + og.volumePluginMgr.FindMapperPluginBySpec(deviceToDetach.VolumeSpec) if err != nil { return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed", err) } if blockVolumePlugin == nil { return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) } + blockVolumeMapper, newMapperErr := blockVolumePlugin.NewBlockVolumeMapper( + deviceToDetach.VolumeSpec, + nil, /* Pod */ + volume.VolumeOptions{}) + if newMapperErr != nil { + return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewBlockVolumeMapper initialization failed", newMapperErr) + } blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper( string(deviceToDetach.VolumeName), @@ -1031,7 +1050,8 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( unmapDeviceFunc := func() (error, error) { // Search under globalMapPath dir if all symbolic links from pods have been removed already. // If symbolick links are there, pods may still refer the volume. - globalMapPath := deviceToDetach.DeviceMountPath + globalMapPath, err := + blockVolumeMapper.GetGlobalMapPath(deviceToDetach.VolumeSpec) if err != nil { // On failure, return error. Caller will log and retry. return deviceToDetach.GenerateError("UnmapDevice.GetGlobalMapPath failed", err)