From 4de0f79fa3106843d93698365c2da361ee0e06cb Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 9 Nov 2020 08:58:06 -0500 Subject: [PATCH] csi: clear alloc pointers from volume before writing to state store --- nomad/csi_endpoint_test.go | 4 ++++ nomad/state/state_store.go | 22 ++++++++++++++++++++++ nomad/structs/csi.go | 4 ++-- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index ce28f5eee22b..ff91f0da6d8f 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -561,6 +561,10 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { if tc.expectedErrMsg == "" { require.NoError(t, err) + vol, err = state.CSIVolumeByID(nil, ns, volID) + require.NoError(t, err) + require.NotNil(t, vol) + require.Len(t, vol.ReadAllocs, 0) } else { require.Error(t, err) require.True(t, strings.Contains(err.Error(), tc.expectedErrMsg), diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 87c453ba8554..977eec5bfbc2 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -2072,6 +2072,17 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum v.ModifyIndex = index } + // Allocations are copy on write, so we want to keep the Allocation ID + // but we need to clear the pointer so that we don't store it when we + // write the volume to the state store. We'll get it from the db in + // denormalize. + for allocID := range v.ReadAllocs { + v.ReadAllocs[allocID] = nil + } + for allocID := range v.WriteAllocs { + v.WriteAllocs[allocID] = nil + } + err = txn.Insert("csi_volumes", v) if err != nil { return fmt.Errorf("volume insert: %v", err) @@ -2263,6 +2274,17 @@ func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, claim *s volume.ModifyIndex = index + // Allocations are copy on write, so we want to keep the Allocation ID + // but we need to clear the pointer so that we don't store it when we + // write the volume to the state store. We'll get it from the db in + // denormalize. + for allocID := range volume.ReadAllocs { + volume.ReadAllocs[allocID] = nil + } + for allocID := range volume.WriteAllocs { + volume.WriteAllocs[allocID] = nil + } + if err = txn.Insert("csi_volumes", volume); err != nil { return fmt.Errorf("volume update failed: %s: %v", id, err) } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index c0f9dc4df3d3..ade9181fbeb6 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -378,7 +378,7 @@ func (v *CSIVolume) WriteSchedulable() bool { func (v *CSIVolume) WriteFreeClaims() bool { switch v.AccessMode { case CSIVolumeAccessModeSingleNodeWriter, CSIVolumeAccessModeMultiNodeSingleWriter: - return len(v.WriteAllocs) == 0 + return len(v.WriteClaims) == 0 case CSIVolumeAccessModeMultiNodeMultiWriter: // the CSI spec doesn't allow for setting a max number of writers. // we track node resource exhaustion through v.ResourceExhausted @@ -496,7 +496,7 @@ func (v *CSIVolume) ClaimWrite(claim *CSIVolumeClaim, alloc *Allocation) error { if !v.WriteFreeClaims() { // Check the blocking allocations to see if they belong to this job for _, a := range v.WriteAllocs { - if a.Namespace != alloc.Namespace || a.JobID != alloc.JobID { + if a != nil && (a.Namespace != alloc.Namespace || a.JobID != alloc.JobID) { return fmt.Errorf("volume max claim reached") } }