Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

periodic GC for CSI plugins #7878

Merged
merged 6 commits into from
May 6, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions command/agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,13 @@ func convertServerConfig(agentConfig *Config) (*nomad.Config, error) {
}
conf.DeploymentGCThreshold = dur
}
if gcThreshold := agentConfig.Server.CSIPluginGCThreshold; gcThreshold != "" {
dur, err := time.ParseDuration(gcThreshold)
if err != nil {
return nil, err
}
conf.CSIPluginGCThreshold = dur
}

if heartbeatGrace := agentConfig.Server.HeartbeatGrace; heartbeatGrace != 0 {
conf.HeartbeatGrace = heartbeatGrace
Expand Down
8 changes: 8 additions & 0 deletions command/agent/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,11 @@ type ServerConfig struct {
// GCed but the threshold can be used to filter by age.
DeploymentGCThreshold string `hcl:"deployment_gc_threshold"`

// CSIPluginGCThreshold controls how "old" a CSI plugin must be to be
// collected by GC. Age is not the only requirement for a plugin to be
// GCed but the threshold can be used to filter by age.
CSIPluginGCThreshold string `hcl:"csi_plugin_gc_threshold"`

// HeartbeatGrace is the grace period beyond the TTL to account for network,
// processing delays and clock skew before marking a node as "down".
HeartbeatGrace time.Duration
Expand Down Expand Up @@ -1323,6 +1328,9 @@ func (a *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
if b.DeploymentGCThreshold != "" {
result.DeploymentGCThreshold = b.DeploymentGCThreshold
}
if b.CSIPluginGCThreshold != "" {
result.CSIPluginGCThreshold = b.CSIPluginGCThreshold
}
if b.HeartbeatGrace != 0 {
result.HeartbeatGrace = b.HeartbeatGrace
}
Expand Down
1 change: 1 addition & 0 deletions command/agent/config_parse_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ var basicConfig = &Config{
JobGCInterval: "3m",
JobGCThreshold: "12h",
DeploymentGCThreshold: "12h",
CSIPluginGCThreshold: "12h",
HeartbeatGrace: 30 * time.Second,
HeartbeatGraceHCL: "30s",
MinHeartbeatTTL: 33 * time.Second,
Expand Down
1 change: 1 addition & 0 deletions command/agent/testdata/basic.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ server {
job_gc_threshold = "12h"
eval_gc_threshold = "12h"
deployment_gc_threshold = "12h"
csi_plugin_gc_threshold = "12h"
heartbeat_grace = "30s"
min_heartbeat_ttl = "33s"
max_heartbeats_per_second = 11.0
Expand Down
1 change: 1 addition & 0 deletions command/agent/testdata/basic.json
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,7 @@
{
"authoritative_region": "foobar",
"bootstrap_expect": 5,
"csi_plugin_gc_threshold": "12h",
"data_dir": "/tmp/data",
"deployment_gc_threshold": "12h",
"enabled": true,
Expand Down
9 changes: 9 additions & 0 deletions nomad/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,13 @@ type Config struct {
// for GC. This gives users some time to view terminal deployments.
DeploymentGCThreshold time.Duration

// CSIPluginGCInterval is how often we dispatch a job to GC unused plugins.
CSIPluginGCInterval time.Duration

// CSIPluginGCThreshold is how "old" a plugin must be to be eligible
// for GC. This gives users some time to debug plugins.
CSIPluginGCThreshold time.Duration

// EvalNackTimeout controls how long we allow a sub-scheduler to
// work on an evaluation before we consider it failed and Nack it.
// This allows that evaluation to be handed to another sub-scheduler
Expand Down Expand Up @@ -377,6 +384,8 @@ func DefaultConfig() *Config {
NodeGCThreshold: 24 * time.Hour,
DeploymentGCInterval: 5 * time.Minute,
DeploymentGCThreshold: 1 * time.Hour,
CSIPluginGCInterval: 5 * time.Minute,
CSIPluginGCThreshold: 1 * time.Hour,
EvalNackTimeout: 60 * time.Second,
EvalDeliveryLimit: 3,
EvalNackInitialReenqueueDelay: 1 * time.Second,
Expand Down
53 changes: 53 additions & 0 deletions nomad/core_sched.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ func (c *CoreScheduler) Process(eval *structs.Evaluation) error {
return c.deploymentGC(eval)
case structs.CoreJobCSIVolumeClaimGC:
return c.csiVolumeClaimGC(eval)
case structs.CoreJobCSIPluginGC:
return c.csiPluginGC(eval)
case structs.CoreJobForceGC:
return c.forceGC(eval)
default:
Expand All @@ -72,6 +74,9 @@ func (c *CoreScheduler) forceGC(eval *structs.Evaluation) error {
if err := c.deploymentGC(eval); err != nil {
return err
}
if err := c.csiPluginGC(eval); err != nil {
return err
}

// Node GC must occur after the others to ensure the allocations are
// cleared.
Expand Down Expand Up @@ -736,3 +741,51 @@ func (c *CoreScheduler) csiVolumeClaimGC(eval *structs.Evaluation) error {
err := c.srv.RPC("CSIVolume.Claim", req, &structs.CSIVolumeClaimResponse{})
return err
}

// csiPluginGC is used to garbage collect unused plugins
func (c *CoreScheduler) csiPluginGC(eval *structs.Evaluation) error {

ws := memdb.NewWatchSet()

iter, err := c.snap.CSIPlugins(ws)
if err != nil {
return err
}

// Get the time table to calculate GC cutoffs.
var oldThreshold uint64
if eval.JobID == structs.CoreJobForceGC {
// The GC was forced, so set the threshold to its maximum so
// everything will GC.
oldThreshold = math.MaxUint64
c.logger.Debug("forced plugin GC")
} else {
tt := c.srv.fsm.TimeTable()
cutoff := time.Now().UTC().Add(-1 * c.srv.config.CSIPluginGCThreshold)
oldThreshold = tt.NearestIndex(cutoff)
}

c.logger.Debug("CSI plugin GC scanning before cutoff index",
"index", oldThreshold, "csi_plugin_gc_threshold", c.srv.config.CSIPluginGCThreshold)

for i := iter.Next(); i != nil; i = iter.Next() {
plugin := i.(*structs.CSIPlugin)

// Ignore new plugins
if plugin.CreateIndex > oldThreshold {
continue
}

req := &structs.CSIPluginDeleteRequest{ID: plugin.ID}
req.Region = c.srv.Region()
err := c.srv.RPC("CSIPlugin.Delete", req, &structs.CSIPluginDeleteResponse{})
if err != nil {
if err.Error() == "plugin in use" {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's a shame to do this with string matching, but I can't think of anything else reasonably easy.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now that we're on golang 1.14, it'd be worth making sure all our error strings have proper error types so we can test them with errors.Is(). But for now this will do I guess. 😁

continue
}
c.logger.Error("failed to GC plugin", "plugin_id", plugin.ID, "error", err)
return err
}
}
return nil
}
55 changes: 55 additions & 0 deletions nomad/core_sched_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -2193,3 +2194,57 @@ func TestAllocation_GCEligible(t *testing.T) {
alloc.ClientStatus = structs.AllocClientStatusComplete
require.True(allocGCEligible(alloc, nil, time.Now(), 1000))
}

func TestCoreScheduler_CSIPluginGC(t *testing.T) {
t.Parallel()

srv, cleanupSRV := TestServer(t, nil)
defer cleanupSRV()
testutil.WaitForLeader(t, srv.RPC)
require := require.New(t)

srv.fsm.timetable.table = make([]TimeTableEntry, 1, 10)

deleteNodes := state.CreateTestCSIPlugin(srv.fsm.State(), "foo")
defer deleteNodes()
state := srv.fsm.State()

// Update the time tables to make this work
tt := srv.fsm.TimeTable()
index := uint64(2000)
tt.Witness(index, time.Now().UTC().Add(-1*srv.config.DeploymentGCThreshold))

// Create a core scheduler
snap, err := state.Snapshot()
require.NoError(err)
core := NewCoreScheduler(srv, snap)

// Attempt the GC
index++
gc := srv.coreJobEval(structs.CoreJobCSIPluginGC, index)
require.NoError(core.Process(gc))

// Should not be gone (plugin in use)
ws := memdb.NewWatchSet()
plug, err := state.CSIPluginByID(ws, "foo")
require.NotNil(plug)
require.NoError(err)

// Empty the plugin
plug.Controllers = map[string]*structs.CSIInfo{}
plug.Nodes = map[string]*structs.CSIInfo{}

index++
err = state.UpsertCSIPlugin(index, plug)
require.NoError(err)

// Retry
index++
gc = srv.coreJobEval(structs.CoreJobCSIPluginGC, index)
require.NoError(core.Process(gc))

// Should be gone
plug, err = state.CSIPluginByID(ws, "foo")
require.Nil(plug)
require.NoError(err)
}
31 changes: 31 additions & 0 deletions nomad/csi_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -594,3 +594,34 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu
}}
return v.srv.blockingRPC(&opts)
}

// Delete deletes a plugin if it is unused
func (v *CSIPlugin) Delete(args *structs.CSIPluginDeleteRequest, reply *structs.CSIPluginDeleteResponse) error {
if done, err := v.srv.forward("CSIPlugin.Delete", args, args, reply); done {
return err
}

// Check that it is a management token.
if aclObj, err := v.srv.ResolveToken(args.AuthToken); err != nil {
return err
} else if aclObj != nil && !aclObj.IsManagement() {
return structs.ErrPermissionDenied
}

metricsStart := time.Now()
defer metrics.MeasureSince([]string{"nomad", "plugin", "delete"}, metricsStart)

resp, index, err := v.srv.raftApply(structs.CSIPluginDeleteRequestType, args)
if err != nil {
v.logger.Error("csi raft apply failed", "error", err, "method", "delete")
return err
}

if respErr, ok := resp.(error); ok {
return respErr
}

reply.Index = index
v.srv.setQueryMeta(&reply.QueryMeta)
return nil
}
81 changes: 81 additions & 0 deletions nomad/csi_endpoint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -548,6 +548,87 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) {
require.Nil(t, resp2.Plugin)
}

func TestCSIPluginEndpoint_DeleteViaGC(t *testing.T) {
t.Parallel()
srv, shutdown := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer shutdown()
testutil.WaitForLeader(t, srv.RPC)

deleteNodes := state.CreateTestCSIPlugin(srv.fsm.State(), "foo")
defer deleteNodes()

state := srv.fsm.State()
state.BootstrapACLTokens(1, 0, mock.ACLManagementToken())
srv.config.ACLEnabled = true
codec := rpcClient(t, srv)

// Get the plugin back out
listJob := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob})
policy := mock.PluginPolicy("read") + listJob
getToken := mock.CreatePolicyAndToken(t, state, 1001, "plugin-read", policy)

reqGet := &structs.CSIPluginGetRequest{
ID: "foo",
QueryOptions: structs.QueryOptions{
Region: "global",
AuthToken: getToken.SecretID,
},
}
respGet := &structs.CSIPluginGetResponse{}
err := msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", reqGet, respGet)
require.NoError(t, err)
require.NotNil(t, respGet.Plugin)

// Delete plugin
reqDel := &structs.CSIPluginDeleteRequest{
ID: "foo",
QueryOptions: structs.QueryOptions{
Region: "global",
AuthToken: getToken.SecretID,
},
}
respDel := &structs.CSIPluginDeleteResponse{}

// Improper permissions
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Delete", reqDel, respDel)
require.EqualError(t, err, structs.ErrPermissionDenied.Error())

// Retry with management permissions
reqDel.AuthToken = srv.getLeaderAcl()
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Delete", reqDel, respDel)
require.EqualError(t, err, "plugin in use")

// Plugin was not deleted
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", reqGet, respGet)
require.NoError(t, err)
require.NotNil(t, respGet.Plugin)

// Empty the plugin
plugin := respGet.Plugin.Copy()
plugin.Controllers = map[string]*structs.CSIInfo{}
plugin.Nodes = map[string]*structs.CSIInfo{}

index, _ := state.LatestIndex()
index++
err = state.UpsertCSIPlugin(index, plugin)
require.NoError(t, err)

// Retry now that it's empty
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Delete", reqDel, respDel)
require.NoError(t, err)

// Plugin is deleted
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", reqGet, respGet)
require.NoError(t, err)
require.Nil(t, respGet.Plugin)

// Safe to call on already-deleted plugnis
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Delete", reqDel, respDel)
require.NoError(t, err)
}

func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) {
srv, shutdown := TestServer(t, func(c *Config) {})
defer shutdown()
Expand Down
24 changes: 22 additions & 2 deletions nomad/fsm.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,10 +270,12 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} {
return n.applyCSIVolumeDeregister(buf[1:], log.Index)
case structs.CSIVolumeClaimRequestType:
return n.applyCSIVolumeClaim(buf[1:], log.Index)
case structs.CSIVolumeClaimBatchRequestType:
return n.applyCSIVolumeBatchClaim(buf[1:], log.Index)
case structs.ScalingEventRegisterRequestType:
return n.applyUpsertScalingEvent(buf[1:], log.Index)
case structs.CSIVolumeClaimBatchRequestType:
return n.applyCSIVolumeBatchClaim(buf[1:], log.Index)
case structs.CSIPluginDeleteRequestType:
return n.applyCSIPluginDelete(buf[1:], log.Index)
}

// Check enterprise only message types.
Expand Down Expand Up @@ -1190,6 +1192,24 @@ func (n *nomadFSM) applyCSIVolumeClaim(buf []byte, index uint64) interface{} {
return nil
}

func (n *nomadFSM) applyCSIPluginDelete(buf []byte, index uint64) interface{} {
var req structs.CSIPluginDeleteRequest
if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err))
}
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_plugin_delete"}, time.Now())

if err := n.state.DeleteCSIPlugin(index, req.ID); err != nil {
// "plugin in use" is an error for the state store but not for typical
// callers, so reduce log noise by not logging that case here
if err.Error() != "plugin in use" {
n.logger.Error("DeleteCSIPlugin failed", "error", err)
}
return err
}
return nil
}

func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) {
// Create a new snapshot
snap, err := n.state.Snapshot()
Expand Down
Loading