Skip to content

Commit

Permalink
Fixes bug that prevented group shutdown_delay updates
Browse files Browse the repository at this point in the history
Group shutdown delay updates were not properly handled in Update hook.
This commit also ensures that plan output is displayed.
  • Loading branch information
drewbailey committed Apr 3, 2020
1 parent 222886e commit 39bfe4b
Show file tree
Hide file tree
Showing 6 changed files with 54 additions and 17 deletions.
2 changes: 1 addition & 1 deletion client/allocrunner/alloc_runner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func TestAllocRunner_TaskGroup_ShutdownDelay(t *testing.T) {

// Set a shutdown delay
shutdownDelay := 1 * time.Second
alloc.Job.TaskGroups[0].ShutdownDelay = &shutdownDelay
alloc.Job.TaskGroups[0].ShutdownDelay = shutdownDelay

conf, cleanup := testAllocRunnerConfig(t, alloc)
defer cleanup()
Expand Down
8 changes: 2 additions & 6 deletions client/allocrunner/groupservice_hook.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,20 +46,15 @@ type groupServiceHookConfig struct {
}

func newGroupServiceHook(cfg groupServiceHookConfig) *groupServiceHook {
var shutdownDelay time.Duration
tg := cfg.alloc.Job.LookupTaskGroup(cfg.alloc.TaskGroup)

if tg.ShutdownDelay != nil {
shutdownDelay = *tg.ShutdownDelay
}

h := &groupServiceHook{
allocID: cfg.alloc.ID,
group: cfg.alloc.TaskGroup,
restarter: cfg.restarter,
consulClient: cfg.consul,
taskEnvBuilder: cfg.taskEnvBuilder,
delay: shutdownDelay,
delay: tg.ShutdownDelay,
}
h.logger = cfg.logger.Named(h.Name())
h.services = cfg.alloc.Job.LookupTaskGroup(h.group).Services
Expand Down Expand Up @@ -116,6 +111,7 @@ func (h *groupServiceHook) Update(req *interfaces.RunnerUpdateRequest) error {
h.services = req.Alloc.Job.LookupTaskGroup(h.group).Services
h.canary = canary
h.taskEnvBuilder.UpdateTask(req.Alloc, nil)
h.delay = req.Alloc.Job.LookupTaskGroup(h.group).ShutdownDelay

// Create new task services struct with those new values
newWorkloadServices := h.getWorkloadServices()
Expand Down
28 changes: 28 additions & 0 deletions client/allocrunner/groupservice_hook_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,34 @@ func TestGroupServiceHook_NoGroupServices(t *testing.T) {
require.Equal(t, "remove", ops[2].Op)
}

// TestGroupServiceHook_ShutdownDelayUpdate asserts calling group service hooks
// update updates the hooks delay value.
func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) {
t.Parallel()

alloc := mock.Alloc()
alloc.Job.TaskGroups[0].ShutdownDelay = 10 * time.Second

logger := testlog.HCLogger(t)
consulClient := consul.NewMockConsulServiceClient(t, logger)

h := newGroupServiceHook(groupServiceHookConfig{
alloc: alloc,
consul: consulClient,
restarter: agentconsul.NoopRestarter(),
taskEnvBuilder: taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region),
logger: logger,
})
require.NoError(t, h.Prerun())

alloc.Job.TaskGroups[0].ShutdownDelay = 15 * time.Second
req := &interfaces.RunnerUpdateRequest{Alloc: alloc}
require.NoError(t, h.Update(req))

// Assert that update updated the delay value
require.Equal(t, h.delay, 15*time.Second)
}

// TestGroupServiceHook_GroupServices asserts group service hooks with group
// services does not error.
func TestGroupServiceHook_GroupServices(t *testing.T) {
Expand Down
5 changes: 1 addition & 4 deletions command/agent/job_endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -777,6 +777,7 @@ func ApiTgToStructsTG(job *structs.Job, taskGroup *api.TaskGroup, tg *structs.Ta
tg.Name = *taskGroup.Name
tg.Count = *taskGroup.Count
tg.Meta = taskGroup.Meta
tg.ShutdownDelay = *taskGroup.ShutdownDelay
tg.Constraints = ApiConstraintsToStructs(taskGroup.Constraints)
tg.Affinities = ApiAffinitiesToStructs(taskGroup.Affinities)
tg.Networks = ApiNetworkResourceToStructs(taskGroup.Networks)
Expand All @@ -789,10 +790,6 @@ func ApiTgToStructsTG(job *structs.Job, taskGroup *api.TaskGroup, tg *structs.Ta
Mode: *taskGroup.RestartPolicy.Mode,
}

if taskGroup.ShutdownDelay != nil {
tg.ShutdownDelay = taskGroup.ShutdownDelay
}

if taskGroup.ReschedulePolicy != nil {
tg.ReschedulePolicy = &structs.ReschedulePolicy{
Attempts: *taskGroup.ReschedulePolicy.Attempts,
Expand Down
20 changes: 20 additions & 0 deletions nomad/structs/diff_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3029,6 +3029,26 @@ func TestTaskGroupDiff(t *testing.T) {
},
},
},
{
TestCase: "TaskGroup shutdown_delay edited",
Old: &TaskGroup{
ShutdownDelay: 30 * time.Second,
},
New: &TaskGroup{
ShutdownDelay: 5 * time.Second,
},
Expected: &TaskGroupDiff{
Type: DiffTypeEdited,
Fields: []*FieldDiff{
{
Type: DiffTypeEdited,
Name: "ShutdownDelay",
Old: "30000000000",
New: "5000000000",
},
},
},
},
}

for i, c := range cases {
Expand Down
8 changes: 2 additions & 6 deletions nomad/structs/structs.go
Original file line number Diff line number Diff line change
Expand Up @@ -3790,7 +3790,7 @@ func (j *Job) Validate() error {
taskGroups[tg.Name] = idx
}

if tg.ShutdownDelay != nil && *tg.ShutdownDelay < 0 {
if tg.ShutdownDelay < 0 {
mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value"))
}

Expand Down Expand Up @@ -5202,7 +5202,7 @@ type TaskGroup struct {

// ShutdownDelay is the amount of time to wait between deregistering
// group services in consul and stopping tasks.
ShutdownDelay *time.Duration
ShutdownDelay time.Duration
}

func (tg *TaskGroup) Copy() *TaskGroup {
Expand Down Expand Up @@ -5250,10 +5250,6 @@ func (tg *TaskGroup) Copy() *TaskGroup {
}
}

if tg.ShutdownDelay != nil {
ntg.ShutdownDelay = tg.ShutdownDelay
}

return ntg
}

Expand Down

0 comments on commit 39bfe4b

Please sign in to comment.