Skip to content

Commit

Permalink
Merge pull request #3070 from hashicorp/b-rolling
Browse files Browse the repository at this point in the history
Placing allocs counts towards placement limit
  • Loading branch information
dadgar committed Aug 21, 2017
2 parents b985242 + b6ed801 commit 2cc3962
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 10 deletions.
3 changes: 1 addition & 2 deletions scheduler/generic_sched_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1596,7 +1596,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
job2 := job.Copy()
job2.TaskGroups[0].Count = 5
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
MaxParallel: 1,
MaxParallel: 5,
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 10 * time.Minute,
Expand All @@ -1607,7 +1607,6 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
noErr(t, h.State.UpsertJob(h.NextIndex(), job2))

// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Priority: 50,
Expand Down
3 changes: 3 additions & 0 deletions scheduler/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -374,6 +374,9 @@ func (a *allocReconciler) computeGroup(group string, all allocSet) bool {
for _, p := range place {
a.result.place = append(a.result.place, p)
}

min := helper.IntMin(len(place), limit)
limit -= min
} else if !deploymentPlaceReady && len(lost) != 0 {
// We are in a situation where we shouldn't be placing more than we need
// to but we have lost allocations. It is a very weird user experience
Expand Down
62 changes: 54 additions & 8 deletions scheduler/reconcile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ Update stanza Tests:
√ Finished deployment gets marked as complete
√ The stagger is correctly calculated when it is applied across multiple task groups.
√ Change job change while scaling up
√ Update the job when all allocations from the previous job haven't been placed yet.
*/

var (
Expand Down Expand Up @@ -2468,9 +2469,9 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) {
PlacedAllocs: 7,
}

// Create 3 allocations from the old job
// Create 2 allocations from the old job
var allocs []*structs.Allocation
for i := 7; i < 10; i++ {
for i := 8; i < 10; i++ {
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
Expand All @@ -2482,7 +2483,7 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) {

// Create the healthy replacements
handled := make(map[string]allocUpdateType)
for i := 0; i < 7; i++ {
for i := 0; i < 8; i++ {
new := mock.Alloc()
new.Job = job
new.JobID = job.ID
Expand All @@ -2501,7 +2502,7 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) {
tainted := make(map[string]*structs.Node, 3)
for i := 0; i < 3; i++ {
n := mock.Node()
n.ID = allocs[3+i].NodeID
n.ID = allocs[2+i].NodeID
if i == 0 {
n.Status = structs.NodeStatusDown
} else {
Expand All @@ -2519,21 +2520,21 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) {
createDeployment: nil,
deploymentUpdates: nil,
place: 2,
destructive: 3,
destructive: 2,
stop: 2,
followupEvalWait: 31 * time.Second,
desiredTGUpdates: map[string]*structs.DesiredUpdates{
job.TaskGroups[0].Name: {
Place: 1, // Place the lost
Stop: 1, // Stop the lost
Migrate: 1, // Migrate the tainted
DestructiveUpdate: 3,
Ignore: 5,
DestructiveUpdate: 2,
Ignore: 6,
},
},
})

assertNamesHaveIndexes(t, intRange(7, 9), destructiveResultsToNames(r.destructiveUpdate))
assertNamesHaveIndexes(t, intRange(8, 9), destructiveResultsToNames(r.destructiveUpdate))
assertNamesHaveIndexes(t, intRange(0, 1), placeResultsToNames(r.place))
assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
}
Expand Down Expand Up @@ -3012,3 +3013,48 @@ func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) {
},
})
}

// Tests the reconciler doesn't stop allocations when doing a rolling upgrade
// where the count of the old job allocs is < desired count.
func TestReconciler_RollingUpgrade_MissingAllocs(t *testing.T) {
job := mock.Job()
job.TaskGroups[0].Update = noCanaryUpdate

// Create 7 allocations from the old job
var allocs []*structs.Allocation
for i := 0; i < 7; i++ {
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
alloc.NodeID = structs.GenerateUUID()
alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
alloc.TaskGroup = job.TaskGroups[0].Name
allocs = append(allocs, alloc)
}

reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil)
r := reconciler.Compute()

d := structs.NewDeployment(job)
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
DesiredTotal: 10,
}

// Assert the correct results
assertResults(t, r, &resultExpectation{
createDeployment: d,
deploymentUpdates: nil,
place: 3,
destructive: 1,
desiredTGUpdates: map[string]*structs.DesiredUpdates{
job.TaskGroups[0].Name: {
Place: 3,
DestructiveUpdate: 1,
Ignore: 6,
},
},
})

assertNamesHaveIndexes(t, intRange(7, 9), placeResultsToNames(r.place))
assertNamesHaveIndexes(t, intRange(0, 0), destructiveResultsToNames(r.destructiveUpdate))
}

0 comments on commit 2cc3962

Please sign in to comment.