diff --git a/CHANGELOG.md b/CHANGELOG.md index 4693be800ca4..117a34bbc23f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ BUG FIXES: * core: Ignore `server` config values if `server` is disabled [[GH-6047](https://github.com/hashicorp/nomad/issues/6047)] * core: Added `semver` constraint for strict Semver 2.0 version comparisons [[GH-6699](https://github.com/hashicorp/nomad/issues/6699)] + * core: Fixed server panic caused by a plan evicting and preempting allocs on a node [[GH-6792](https://github.com/hashicorp/nomad/issues/6792)] * api: Return a 404 if endpoint not found instead of redirecting to /ui/ [[GH-6658](https://github.com/hashicorp/nomad/issues/6658)] * api: Decompress web socket response body if gzipped on error responses [[GH-6650](https://github.com/hashicorp/nomad/issues/6650)] * api: Fixed a bug where some FS/Allocation API endpoints didn't return error messages [[GH-6427](https://github.com/hashicorp/nomad/issues/6427)] diff --git a/scheduler/context.go b/scheduler/context.go index e38ae5b0f318..fa7b51648703 100644 --- a/scheduler/context.go +++ b/scheduler/context.go @@ -25,9 +25,9 @@ type Context interface { // Reset is invoked after making a placement Reset() - // ProposedAllocs returns the proposed allocations for a node - // which is the existing allocations, removing evictions, and - // adding any planned placements. + // ProposedAllocs returns the proposed allocations for a node which are + // the existing allocations, removing evictions, and adding any planned + // placements. ProposedAllocs(nodeID string) ([]*structs.Allocation, error) // RegexpCache is a cache of regular expressions @@ -120,22 +120,21 @@ func (e *EvalContext) Reset() { func (e *EvalContext) ProposedAllocs(nodeID string) ([]*structs.Allocation, error) { // Get the existing allocations that are non-terminal ws := memdb.NewWatchSet() - existingAlloc, err := e.state.AllocsByNodeTerminal(ws, nodeID, false) + proposed, err := e.state.AllocsByNodeTerminal(ws, nodeID, false) if err != nil { return nil, err } // Determine the proposed allocation by first removing allocations // that are planned evictions and adding the new allocations. - proposed := existingAlloc if update := e.plan.NodeUpdate[nodeID]; len(update) > 0 { - proposed = structs.RemoveAllocs(existingAlloc, update) + proposed = structs.RemoveAllocs(proposed, update) } // Remove any allocs that are being preempted nodePreemptedAllocs := e.plan.NodePreemptions[nodeID] if len(nodePreemptedAllocs) > 0 { - proposed = structs.RemoveAllocs(existingAlloc, nodePreemptedAllocs) + proposed = structs.RemoveAllocs(proposed, nodePreemptedAllocs) } // We create an index of the existing allocations so that if an inplace diff --git a/scheduler/context_test.go b/scheduler/context_test.go index e5e0be7a84cb..7ac1710214a3 100644 --- a/scheduler/context_test.go +++ b/scheduler/context_test.go @@ -106,9 +106,9 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) - noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) + require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) + require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() @@ -149,6 +149,116 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { } } +// TestEvalContext_ProposedAlloc_EvictPreempt asserts both Evicted and +// Preempted allocs are removed from the allocs propsed for a node. +// +// See https://github.com/hashicorp/nomad/issues/6787 +// +func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { + t.Parallel() + state, ctx := testContext(t) + nodes := []*RankedNode{ + { + Node: &structs.Node{ + ID: uuid.Generate(), + NodeResources: &structs.NodeResources{ + Cpu: structs.NodeCpuResources{ + CpuShares: 1024 * 3, + }, + Memory: structs.NodeMemoryResources{ + MemoryMB: 1024 * 3, + }, + }, + }, + }, + } + + // Add existing allocations + j1, j2, j3 := mock.Job(), mock.Job(), mock.Job() + allocEvict := &structs.Allocation{ + ID: uuid.Generate(), + Namespace: structs.DefaultNamespace, + EvalID: uuid.Generate(), + NodeID: nodes[0].Node.ID, + JobID: j1.ID, + Job: j1, + AllocatedResources: &structs.AllocatedResources{ + Tasks: map[string]*structs.AllocatedTaskResources{ + "web": { + Cpu: structs.AllocatedCpuResources{ + CpuShares: 1024, + }, + Memory: structs.AllocatedMemoryResources{ + MemoryMB: 1024, + }, + }, + }, + }, + DesiredStatus: structs.AllocDesiredStatusRun, + ClientStatus: structs.AllocClientStatusPending, + TaskGroup: "web", + } + allocPreempt := &structs.Allocation{ + ID: uuid.Generate(), + Namespace: structs.DefaultNamespace, + EvalID: uuid.Generate(), + NodeID: nodes[0].Node.ID, + JobID: j2.ID, + Job: j2, + AllocatedResources: &structs.AllocatedResources{ + Tasks: map[string]*structs.AllocatedTaskResources{ + "web": { + Cpu: structs.AllocatedCpuResources{ + CpuShares: 1024, + }, + Memory: structs.AllocatedMemoryResources{ + MemoryMB: 1024, + }, + }, + }, + }, + DesiredStatus: structs.AllocDesiredStatusRun, + ClientStatus: structs.AllocClientStatusPending, + TaskGroup: "web", + } + allocPropose := &structs.Allocation{ + ID: uuid.Generate(), + Namespace: structs.DefaultNamespace, + EvalID: uuid.Generate(), + NodeID: nodes[0].Node.ID, + JobID: j3.ID, + Job: j3, + AllocatedResources: &structs.AllocatedResources{ + Tasks: map[string]*structs.AllocatedTaskResources{ + "web": { + Cpu: structs.AllocatedCpuResources{ + CpuShares: 1024, + }, + Memory: structs.AllocatedMemoryResources{ + MemoryMB: 1024, + }, + }, + }, + }, + DesiredStatus: structs.AllocDesiredStatusRun, + ClientStatus: structs.AllocClientStatusPending, + TaskGroup: "web", + } + require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID))) + require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID))) + require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID))) + require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose})) + + // Plan to evict one alloc and preempt another + plan := ctx.Plan() + plan.NodePreemptions[nodes[0].Node.ID] = []*structs.Allocation{allocEvict} + plan.NodeUpdate[nodes[0].Node.ID] = []*structs.Allocation{allocPreempt} + + proposed, err := ctx.ProposedAllocs(nodes[0].Node.ID) + require.NoError(t, err) + require.Len(t, proposed, 1) +} + func TestEvalEligibility_JobStatus(t *testing.T) { e := NewEvalEligibility() cc := "v1:100" diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 18ec4160670d..0e966d39d422 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -22,12 +22,12 @@ func TestServiceSched_JobRegister(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -39,7 +39,7 @@ func TestServiceSched_JobRegister(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -78,7 +78,7 @@ func TestServiceSched_JobRegister(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -112,13 +112,13 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.Job() job.TaskGroups[0].EphemeralDisk.Sticky = true - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -129,7 +129,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewServiceScheduler, eval); err != nil { @@ -151,7 +151,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { // Update the job to force a rolling upgrade updated := job.Copy() updated.TaskGroups[0].Tasks[0].Resources.CPU += 10 - noErr(t, h.State.UpsertJob(h.NextIndex(), updated)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), updated)) // Create a mock evaluation to handle the update eval = &structs.Evaluation{ @@ -162,7 +162,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) h1 := NewHarnessWithState(t, h.State) if err := h1.Process(NewServiceScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -203,14 +203,14 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { // Create a node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job with count 2 and disk as 60GB so that only one allocation // can fit job := mock.Job() job.TaskGroups[0].Count = 2 job.TaskGroups[0].EphemeralDisk.SizeMB = 88 * 1024 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -222,7 +222,7 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -262,7 +262,7 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure only one allocation was placed if len(out) != 1 { @@ -278,7 +278,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job that uses distinct host and has count 1 higher than what is @@ -286,7 +286,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 11 job.Constraints = append(job.Constraints, &structs.Constraint{Operand: structs.ConstraintDistinctHosts}) - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -298,7 +298,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -335,7 +335,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -365,7 +365,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { rack = "rack1" } node.Meta["rack"] = rack - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job that uses distinct property and has count higher than what is @@ -378,7 +378,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { LTarget: "${meta.rack}", RTarget: "2", }) - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -390,7 +390,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -432,7 +432,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 4 { @@ -458,7 +458,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { for i := 0; i < 2; i++ { node := mock.Node() node.Meta["ssd"] = "true" - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job that uses distinct property only on one task group. @@ -473,7 +473,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { job.TaskGroups[1].Name = "tg2" job.TaskGroups[1].Count = 2 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -484,7 +484,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -520,7 +520,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 3 { @@ -578,7 +578,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation assert.Nil(h.Process(NewServiceScheduler, eval), "Process") @@ -665,7 +665,7 @@ func TestServiceSched_Spread(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation assert.Nil(h.Process(NewServiceScheduler, eval), "Process") @@ -742,7 +742,7 @@ func TestServiceSched_EvenSpread(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation assert.Nil(h.Process(NewServiceScheduler, eval), "Process") @@ -785,12 +785,12 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -802,7 +802,7 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { AnnotatePlan: true, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -828,7 +828,7 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -864,13 +864,13 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job and set the task group count to zero. job := mock.Job() job.TaskGroups[0].Count = 0 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -881,7 +881,7 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -897,7 +897,7 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure no allocations placed if len(out) != 0 { @@ -913,7 +913,7 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) { // Create NO nodes // Create a job job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -925,7 +925,7 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -992,17 +992,17 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { }, } node.ComputeClass() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create an ineligible node node2 := mock.Node() node2.Attributes["kernel.name"] = "windows" node2.ComputeClass() - noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a jobs job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1014,7 +1014,7 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1085,8 +1085,8 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { // Create one node node := mock.Node() node.NodeClass = "class_0" - noErr(t, node.ComputeClass()) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, node.ComputeClass()) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job that constrains on a node class job := mock.Job() @@ -1102,7 +1102,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { tg2.Name = "web2" tg2.Constraints[1].RTarget = "class_1" job.TaskGroups = append(job.TaskGroups, tg2) - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1113,7 +1113,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -1138,7 +1138,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { // Ensure two allocations placed ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) if len(out) != 2 { t.Fatalf("bad: %#v", out) } @@ -1179,7 +1179,7 @@ func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { // Create a job and set the task group count to zero. job := mock.Job() job.TaskGroups[0].Count = 0 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1192,7 +1192,7 @@ func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { } // Insert it into the state store - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1213,14 +1213,14 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) { // Create a node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job with a high resource ask so that all the allocations can't // be placed on a single node. job := mock.Job() job.TaskGroups[0].Count = 3 job.TaskGroups[0].Tasks[0].Resources.CPU = 3600 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1232,7 +1232,7 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1263,7 +1263,7 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure only one allocations placed if len(out) != 1 { @@ -1283,7 +1283,7 @@ func TestServiceSched_EvaluateBlockedEval(t *testing.T) { // Create a job job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1296,7 +1296,7 @@ func TestServiceSched_EvaluateBlockedEval(t *testing.T) { } // Insert it into the state store - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1329,12 +1329,12 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job and set the task group count to zero. job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock blocked evaluation eval := &structs.Evaluation{ @@ -1347,7 +1347,7 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { } // Insert it into the state store - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1386,7 +1386,7 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -1415,12 +1415,12 @@ func TestServiceSched_JobModify(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -1431,7 +1431,7 @@ func TestServiceSched_JobModify(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -1444,7 +1444,7 @@ func TestServiceSched_JobModify(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) // Update the job job2 := mock.Job() @@ -1452,7 +1452,7 @@ func TestServiceSched_JobModify(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1463,7 +1463,7 @@ func TestServiceSched_JobModify(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1498,7 +1498,7 @@ func TestServiceSched_JobModify(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -1518,13 +1518,13 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { // Create one node node := mock.Node() node.NodeResources.Cpu.CpuShares = 1000 - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job with one allocation job := mock.Job() job.TaskGroups[0].Tasks[0].Resources.CPU = 256 job2 := job.Copy() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation alloc := mock.Alloc() @@ -1534,11 +1534,11 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { alloc.Name = "my-job.web[0]" alloc.AllocatedResources.Tasks["web"].Cpu.CpuShares = 256 allocs = append(allocs, alloc) - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job to count 3 job2.TaskGroups[0].Count = 3 - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1549,7 +1549,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1593,7 +1593,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -1612,12 +1612,12 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -1628,7 +1628,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i)) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -1641,13 +1641,13 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) // Update the job to be count zero job2 := mock.Job() job2.ID = job.ID job2.TaskGroups[0].Count = 0 - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1658,7 +1658,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1693,7 +1693,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -1712,12 +1712,12 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -1728,7 +1728,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -1743,7 +1743,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1754,7 +1754,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1816,7 +1816,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { // Create a node and clear the reserved resources node := mock.Node() node.ReservedResources = nil - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a resource ask that is the same as the resources available on the // node @@ -1844,7 +1844,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { job := mock.Job() job.TaskGroups[0].Count = 1 job.TaskGroups[0].Tasks[0].Resources = request - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) alloc := mock.Alloc() alloc.AllocatedResources = allocated @@ -1852,7 +1852,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Update the job to place more versions of the task group, drop the count // and force destructive updates @@ -1868,7 +1868,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) eval := &structs.Evaluation{ Namespace: structs.DefaultNamespace, @@ -1878,7 +1878,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1938,12 +1938,12 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -1954,7 +1954,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -1970,7 +1970,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1981,7 +1981,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2051,15 +2051,15 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations and create an older deployment job := mock.Job() d := mock.Deployment() d.JobID = job.ID - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) - noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) // Create allocs that are part of the old deployment var allocs []*structs.Allocation @@ -2073,7 +2073,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -2085,7 +2085,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { MinHealthyTime: 10 * time.Second, HealthyDeadline: 10 * time.Minute, } - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2096,7 +2096,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2136,7 +2136,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -2178,12 +2178,12 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { // Create node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job with 0.8 allocations job := mock.Job() job.TaskGroups[0].Count = 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create 0.8 alloc alloc := mock.Alloc() @@ -2191,13 +2191,13 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.AllocatedResources = nil // 0.8 didn't have this - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Update the job inplace job2 := job.Copy() job2.TaskGroups[0].Tasks[0].Services[0].Tags[0] = "newtag" - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2208,7 +2208,7 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2238,7 +2238,7 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed require.Len(t, out, 1) @@ -2259,7 +2259,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { node := mock.Node() node.Meta["rack"] = fmt.Sprintf("rack%d", i) nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job that uses distinct property and has count higher than what is @@ -2271,7 +2271,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { Operand: structs.ConstraintDistinctProperty, LTarget: "${meta.rack}", }) - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) oldJob := job.Copy() oldJob.JobModifyIndex -= 1 @@ -2287,7 +2287,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -2298,7 +2298,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2340,7 +2340,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -2375,7 +2375,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { for _, alloc := range allocs { h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -2386,7 +2386,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2408,7 +2408,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure that the job field on the allocation is still populated for _, alloc := range out { @@ -2503,11 +2503,11 @@ func TestServiceSched_NodeDown(t *testing.T) { // Register a node node := mock.Node() node.Status = structs.NodeStatusDown - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job with allocations and an update policy. job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2548,7 +2548,7 @@ func TestServiceSched_NodeDown(t *testing.T) { toBeLost[allocs[i].ID] = true } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2560,7 +2560,7 @@ func TestServiceSched_NodeDown(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2598,11 +2598,11 @@ func TestServiceSched_NodeUpdate(t *testing.T) { // Register a node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job with allocations and an update policy. job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2613,14 +2613,14 @@ func TestServiceSched_NodeUpdate(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Mark some allocs as running ws := memdb.NewWatchSet() for i := 0; i < 4; i++ { out, _ := h.State.AllocByID(ws, allocs[i].ID) out.ClientStatus = structs.AllocClientStatusRunning - noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out})) + require.NoError(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out})) } // Create a mock evaluation which won't trigger any new placements @@ -2633,7 +2633,7 @@ func TestServiceSched_NodeUpdate(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2653,17 +2653,17 @@ func TestServiceSched_NodeDrain(t *testing.T) { // Register a draining node node := mock.Node() node.Drain = true - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2675,7 +2675,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2687,7 +2687,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2718,7 +2718,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -2736,11 +2736,11 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { node := mock.Node() node.Drain = true node.Status = structs.NodeStatusDown - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job with allocations job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -2751,7 +2751,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { alloc.Name = fmt.Sprintf("my-job.web[%d]", i) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Set the desired state of the allocs to stop var stop []*structs.Allocation @@ -2761,7 +2761,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.DesiredTransition.Migrate = helper.BoolToPtr(true) stop = append(stop, newAlloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), stop)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), stop)) // Mark some of the allocations as running var running []*structs.Allocation @@ -2770,7 +2770,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.ClientStatus = structs.AllocClientStatusRunning running = append(running, newAlloc) } - noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), running)) + require.NoError(t, h.State.UpdateAllocsFromClient(h.NextIndex(), running)) // Mark some of the allocations as complete var complete []*structs.Allocation @@ -2789,7 +2789,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { newAlloc.ClientStatus = structs.AllocClientStatusComplete complete = append(complete, newAlloc) } - noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), complete)) + require.NoError(t, h.State.UpdateAllocsFromClient(h.NextIndex(), complete)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -2802,7 +2802,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2847,12 +2847,12 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { // Register a draining node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job with allocations and an update policy. job := mock.Job() job.TaskGroups[0].Count = 2 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -2864,10 +2864,10 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) node.Drain = true - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -2879,7 +2879,7 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2900,12 +2900,12 @@ func TestServiceSched_RetryLimit(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -2916,7 +2916,7 @@ func TestServiceSched_RetryLimit(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -2932,7 +2932,7 @@ func TestServiceSched_RetryLimit(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure no allocations placed if len(out) != 0 { @@ -2951,7 +2951,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. @@ -2967,7 +2967,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -2986,7 +2986,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { failedAllocID := allocs[1].ID successAllocID := allocs[0].ID - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -2997,7 +2997,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -3013,7 +3013,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Verify that one new allocation got created with its restart tracker info assert := assert.New(t) @@ -3031,7 +3031,7 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { // Mark this alloc as failed again, should not get rescheduled newAlloc.ClientStatus = structs.AllocClientStatusFailed - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{newAlloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{newAlloc})) // Create another mock evaluation eval = &structs.Evaluation{ @@ -3042,14 +3042,14 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err = h.Process(NewServiceScheduler, eval) assert.Nil(err) // Verify no new allocs were created this time out, err = h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) assert.Equal(3, len(out)) } @@ -3063,7 +3063,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. @@ -3080,7 +3080,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -3098,7 +3098,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { FinishedAt: now}} failedAllocID := allocs[1].ID - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -3109,7 +3109,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -3125,7 +3125,7 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(err) // Verify no new allocs were created require.Equal(2, len(out)) @@ -3151,7 +3151,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } maxRestartAttempts := 3 @@ -3167,7 +3167,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { tgName := job.TaskGroups[0].Name now := time.Now() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -3185,7 +3185,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -3196,7 +3196,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) expectedNumAllocs := 3 expectedNumReschedTrackers := 1 @@ -3208,7 +3208,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { for i := 0; i < maxRestartAttempts; i++ { // Process the evaluation err := h.Process(NewServiceScheduler, eval) - noErr(t, err) + require.NoError(t, err) // Ensure multiple plans if len(h.Plans) == 0 { @@ -3218,7 +3218,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Verify that a new allocation got created with its restart tracker info assert.Equal(expectedNumAllocs, len(out)) @@ -3256,7 +3256,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { failedAllocId = newAlloc.ID failedNodeID = newAlloc.NodeID - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{newAlloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{newAlloc})) // Create another mock evaluation eval = &structs.Evaluation{ @@ -3267,7 +3267,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) expectedNumAllocs += 1 expectedNumReschedTrackers += 1 } @@ -3279,7 +3279,7 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { // Verify no new allocs were created because restart attempts were exhausted ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) assert.Equal(5, len(out)) // 2 original, plus 3 reschedule attempts } @@ -3292,7 +3292,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations and an update policy. @@ -3304,7 +3304,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { Delay: 5 * time.Second, Unlimited: true, } - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 2; i++ { @@ -3361,7 +3361,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { failedAllocID := allocs[1].ID successAllocID := allocs[0].ID - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation eval := &structs.Evaluation{ @@ -3372,7 +3372,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -3388,7 +3388,7 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Verify that one new allocation got created with its restart tracker info assert := assert.New(t) @@ -3425,7 +3425,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations and a reschedule policy. @@ -3506,13 +3506,13 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { // Create a node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a complete alloc alloc := mock.Alloc() @@ -3521,7 +3521,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusComplete - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -3532,7 +3532,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -3548,7 +3548,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure no allocations placed if len(out) != 1 { @@ -3563,13 +3563,13 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { // Create a node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) tgName := job.TaskGroups[0].Name now := time.Now() @@ -3584,7 +3584,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead", StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -3595,7 +3595,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -3611,7 +3611,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure a replacement alloc was placed. if len(out) != 2 { @@ -3633,14 +3633,14 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { // Create a node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job job := mock.Job() job.ID = "my-job" job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 3 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Desired = 3 // Mark one as lost and then schedule @@ -3667,7 +3667,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -3678,7 +3678,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -3694,7 +3694,7 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure a replacement alloc was placed. if len(out) != 4 { @@ -3721,13 +3721,13 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { node := mock.Node() node.Drain = true - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) tgName := job.TaskGroups[0].Name now := time.Now() @@ -3742,7 +3742,7 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead", StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -3753,7 +3753,7 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -3777,14 +3777,14 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { node := mock.Node() node.Drain = true node2 := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) - noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a successful alloc alloc := mock.Alloc() @@ -3804,7 +3804,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { }, }, } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to rerun the job eval := &structs.Evaluation{ @@ -3815,7 +3815,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -3831,7 +3831,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure no replacement alloc was placed. if len(out) != 1 { @@ -3851,13 +3851,13 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() job.Type = structs.JobTypeBatch - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -3869,7 +3869,7 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to trigger the job eval := &structs.Evaluation{ @@ -3880,7 +3880,7 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -3903,13 +3903,13 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.Job() job.Type = structs.JobTypeBatch - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for i := 0; i < 10; i++ { @@ -3921,7 +3921,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job job2 := mock.Job() @@ -3929,7 +3929,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { job2.Type = structs.JobTypeBatch job2.Version++ job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) allocs = nil for i := 0; i < 10; i++ { @@ -3952,7 +3952,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { } allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -3963,7 +3963,7 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -3987,14 +3987,14 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { node := mock.Node() node.Drain = true node2 := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) - noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a running alloc alloc := mock.Alloc() @@ -4003,13 +4003,13 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.ClientStatus = structs.AllocClientStatusRunning - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create an update job job2 := job.Copy() job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} job2.Version++ - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -4021,7 +4021,7 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -4059,14 +4059,14 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { node := mock.Node() node.Drain = true node2 := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) - noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a complete alloc alloc := mock.Alloc() @@ -4085,7 +4085,7 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { }, }, } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -4097,7 +4097,7 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -4121,13 +4121,13 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { // Create a node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Count = 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) scoreMetric := &structs.AllocMetric{ NodesEvaluated: 10, @@ -4153,12 +4153,12 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { alloc.Metrics = scoreMetric allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job's modify index to force an inplace upgrade updatedJob := job.Copy() updatedJob.JobModifyIndex = job.JobModifyIndex + 1 - noErr(t, h.State.UpsertJob(h.NextIndex(), updatedJob)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), updatedJob)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -4170,7 +4170,7 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewBatchScheduler, eval) @@ -4204,12 +4204,12 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -4220,7 +4220,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewServiceScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -4240,7 +4240,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" job1.TaskGroups[0].Count = 12 - noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1)) + require.NoError(t, h1.State.UpsertJob(h1.NextIndex(), job1)) // Create a mock evaluation to update the job eval1 := &structs.Evaluation{ @@ -4251,7 +4251,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { JobID: job1.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h1.Process(NewServiceScheduler, eval1); err != nil { @@ -4293,7 +4293,7 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { // Register a draining node node := mock.Node() node.Drain = true - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create an alloc on the draining node alloc := mock.Alloc() @@ -4302,8 +4302,8 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { alloc.Job.TaskGroups[0].Count = 1 alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) - noErr(t, h.State.UpsertJob(h.NextIndex(), alloc.Job)) - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), alloc.Job)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -4316,7 +4316,7 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4357,14 +4357,14 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { job.JobModifyIndex = job.CreateIndex + 1 job.ModifyIndex = job.CreateIndex + 1 job.Stop = true - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a deployment d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex d.JobModifyIndex = job.JobModifyIndex - 1 - noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -4376,7 +4376,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4393,7 +4393,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { // Ensure the plan cancelled the existing deployment ws := memdb.NewWatchSet() out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID) - noErr(t, err) + require.NoError(t, err) if out == nil { t.Fatalf("No deployment for job") @@ -4428,15 +4428,15 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Generate a fake job job := mock.Job() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a deployment for an old version of the job d := mock.Deployment() d.JobID = job.ID - noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) + require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d)) // Upsert again to bump job version - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to kick the job eval := &structs.Evaluation{ @@ -4448,7 +4448,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -4465,7 +4465,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Ensure the plan cancelled the existing deployment ws := memdb.NewWatchSet() out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID) - noErr(t, err) + require.NoError(t, err) if out == nil { t.Fatalf("No deployment for job") diff --git a/scheduler/rank_test.go b/scheduler/rank_test.go index 862698ef178f..2cae33087f68 100644 --- a/scheduler/rank_test.go +++ b/scheduler/rank_test.go @@ -551,9 +551,9 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) - noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) + require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) + require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2})) taskGroup := &structs.TaskGroup{ EphemeralDisk: &structs.EphemeralDisk{}, @@ -666,9 +666,9 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) { ClientStatus: structs.AllocClientStatusPending, TaskGroup: "web", } - noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) - noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) - noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))) + require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))) + require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2})) // Add a planned eviction to alloc1 plan := ctx.Plan() diff --git a/scheduler/system_sched_test.go b/scheduler/system_sched_test.go index 84e3ee037c64..af0015730a29 100644 --- a/scheduler/system_sched_test.go +++ b/scheduler/system_sched_test.go @@ -21,12 +21,12 @@ func TestSystemSched_JobRegister(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -37,7 +37,7 @@ func TestSystemSched_JobRegister(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -68,7 +68,7 @@ func TestSystemSched_JobRegister(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -95,13 +95,13 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() job.TaskGroups[0].EphemeralDisk.Sticky = true - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -112,7 +112,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { @@ -132,7 +132,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { // Get an allocation and mark it as failed alloc := planned[4].Copy() alloc.ClientStatus = structs.AllocClientStatusFailed - noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to handle the update eval = &structs.Evaluation{ @@ -143,7 +143,7 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) h1 := NewHarnessWithState(t, h.State) if err := h1.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -170,18 +170,18 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { // Create a nodes node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a job job := mock.SystemJob() job.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024 - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create another job with a lot of disk resource ask so that it doesn't fit // the node job1 := mock.SystemJob() job1.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024 - noErr(t, h.State.UpsertJob(h.NextIndex(), job1)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job1)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -192,7 +192,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { @@ -202,7 +202,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 1 { @@ -220,7 +220,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { JobID: job1.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h1.Process(NewSystemScheduler, eval1); err != nil { @@ -228,7 +228,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { } out, err = h1.State.AllocsByJob(ws, job.Namespace, job1.ID, false) - noErr(t, err) + require.NoError(t, err) if len(out) != 0 { t.Fatalf("bad: %#v", out) } @@ -239,7 +239,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { // Create a nodes node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Enable Preemption h.State.SchedulerSetConfig(h.NextIndex(), &structs.SchedulerConfiguration{ @@ -252,7 +252,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { svcJob := mock.Job() svcJob.TaskGroups[0].Count = 1 svcJob.TaskGroups[0].Tasks[0].Resources.CPU = 3600 - noErr(t, h.State.UpsertJob(h.NextIndex(), svcJob)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), svcJob)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -263,7 +263,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { JobID: svcJob.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) if err != nil { @@ -272,7 +272,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { // Create a system job job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval1 := &structs.Evaluation{ @@ -283,7 +283,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) @@ -324,7 +324,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { node.NodeClass = "bar" } node.ComputeClass() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job constraining on node class @@ -335,7 +335,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { Operand: "==", } job.Constraints = append(job.Constraints, fooConstraint) - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -347,7 +347,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { AnnotatePlan: true, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -373,7 +373,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 9 { @@ -416,12 +416,12 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -432,11 +432,11 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Add a new node. node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -447,7 +447,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -487,7 +487,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -504,7 +504,7 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) { // Create NO nodes // Create a job job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -515,7 +515,7 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -538,12 +538,12 @@ func TestSystemSched_JobModify(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -554,7 +554,7 @@ func TestSystemSched_JobModify(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Add a few terminal status allocations, these should be ignored var terminal []*structs.Allocation @@ -567,7 +567,7 @@ func TestSystemSched_JobModify(t *testing.T) { alloc.DesiredStatus = structs.AllocDesiredStatusStop terminal = append(terminal, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) // Update the job job2 := mock.SystemJob() @@ -575,7 +575,7 @@ func TestSystemSched_JobModify(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -586,7 +586,7 @@ func TestSystemSched_JobModify(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -621,7 +621,7 @@ func TestSystemSched_JobModify(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed out, _ = structs.FilterTerminalAllocs(out) @@ -640,12 +640,12 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -656,7 +656,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job job2 := mock.SystemJob() @@ -668,7 +668,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { // Update the task, such that it cannot be done in-place job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -679,7 +679,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) if err != nil { @@ -743,12 +743,12 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -759,12 +759,12 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { alloc.Name = "my-job.web[0]" allocs = append(allocs, alloc) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Update the job job2 := mock.SystemJob() job2.ID = job.ID - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -775,7 +775,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -815,7 +815,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure all allocations placed if len(out) != 10 { @@ -842,7 +842,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations @@ -858,9 +858,9 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { allocs = append(allocs, alloc) } for _, alloc := range allocs { - noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) + require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -871,7 +871,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -895,7 +895,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure no remaining allocations out, _ = structs.FilterTerminalAllocs(out) @@ -914,13 +914,13 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { for i := 0; i < 10; i++ { node := mock.Node() nodes = append(nodes, node) - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Generate a fake job with allocations job := mock.SystemJob() job.Stop = true - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) var allocs []*structs.Allocation for _, node := range nodes { @@ -932,9 +932,9 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { allocs = append(allocs, alloc) } for _, alloc := range allocs { - noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) + require.NoError(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -945,7 +945,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -969,7 +969,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure no remaining allocations out, _ = structs.FilterTerminalAllocs(out) @@ -986,11 +986,11 @@ func TestSystemSched_NodeDown(t *testing.T) { // Register a down node node := mock.Node() node.Status = structs.NodeStatusDown - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) alloc := mock.Alloc() alloc.Job = job @@ -998,7 +998,7 @@ func TestSystemSched_NodeDown(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1010,7 +1010,7 @@ func TestSystemSched_NodeDown(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1054,18 +1054,18 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { node := mock.Node() node.Drain = true node.Status = structs.NodeStatusDown - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with the node update eval := &structs.Evaluation{ @@ -1077,7 +1077,7 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewServiceScheduler, eval) @@ -1115,11 +1115,11 @@ func TestSystemSched_NodeDrain(t *testing.T) { // Register a draining node node := mock.Node() node.Drain = true - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) alloc := mock.Alloc() alloc.Job = job @@ -1127,7 +1127,7 @@ func TestSystemSched_NodeDrain(t *testing.T) { alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1139,7 +1139,7 @@ func TestSystemSched_NodeDrain(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1181,18 +1181,18 @@ func TestSystemSched_NodeUpdate(t *testing.T) { // Register a node node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a fake job allocated on that node. job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) alloc := mock.Alloc() alloc.Job = job alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal eval := &structs.Evaluation{ @@ -1204,7 +1204,7 @@ func TestSystemSched_NodeUpdate(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1227,12 +1227,12 @@ func TestSystemSched_RetryLimit(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deregister the job eval := &structs.Evaluation{ @@ -1243,7 +1243,7 @@ func TestSystemSched_RetryLimit(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1259,7 +1259,7 @@ func TestSystemSched_RetryLimit(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(t, err) // Ensure no allocations placed if len(out) != 0 { @@ -1279,11 +1279,11 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) { // Register a node node := mock.Node() node.Attributes["kernel.name"] = "darwin" - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) // Generate a system job which can't be placed on the node job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deal eval := &structs.Evaluation{ @@ -1295,7 +1295,7 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1388,12 +1388,12 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { // Create some nodes for i := 0; i < 10; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a job job := mock.SystemJob() - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1404,7 +1404,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation if err := h.Process(NewSystemScheduler, eval); err != nil { t.Fatalf("err: %v", err) @@ -1424,12 +1424,12 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { job1.ID = job.ID job1.TaskGroups[0].Tasks[0].Env = make(map[string]string) job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" - noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1)) + require.NoError(t, h1.State.UpsertJob(h1.NextIndex(), job1)) // Insert two more nodes for i := 0; i < 2; i++ { node := mock.Node() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) } // Create a mock evaluation to update the job @@ -1441,7 +1441,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { JobID: job1.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) // Process the evaluation if err := h1.Process(NewSystemScheduler, eval1); err != nil { t.Fatalf("err: %v", err) @@ -1484,12 +1484,12 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { node.NodeClass = "green" node.Drain = true node.ComputeClass() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" node2.ComputeClass() - noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a Job with two task groups, each constrained on node class job := mock.SystemJob() @@ -1505,7 +1505,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create an allocation on each node alloc := mock.Alloc() @@ -1522,7 +1522,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { alloc2.NodeID = node2.ID alloc2.Name = "my-job.web2[0]" alloc2.TaskGroup = "web2" - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc, alloc2})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc, alloc2})) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1534,7 +1534,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1574,12 +1574,12 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { node := mock.Node() node.NodeClass = "green" node.ComputeClass() - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) node2 := mock.Node() node2.NodeClass = "blue" node2.ComputeClass() - noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2)) // Create a Job with two task groups, each constrained on node class job := mock.SystemJob() @@ -1595,7 +1595,7 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { tg2.Name = "web2" tg2.Constraints[0].RTarget = "blue" job.TaskGroups = append(job.TaskGroups, tg2) - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to deal with drain eval := &structs.Evaluation{ @@ -1607,7 +1607,7 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { NodeID: node.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1666,7 +1666,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, }, } - noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + require.NoError(t, h.State.UpsertNode(h.NextIndex(), node)) nodes = append(nodes, node) } @@ -1728,7 +1728,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, } - noErr(t, h.State.UpsertJob(h.NextIndex(), job1)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job1)) job2 := mock.BatchJob() job2.Type = structs.JobTypeBatch @@ -1771,7 +1771,7 @@ func TestSystemSched_Preemption(t *testing.T) { DiskMB: 5 * 1024, }, } - noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2)) job3 := mock.Job() job3.Type = structs.JobTypeBatch @@ -1816,7 +1816,7 @@ func TestSystemSched_Preemption(t *testing.T) { DiskMB: 5 * 1024, }, } - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc1, alloc2, alloc3})) // Create a high priority job and allocs for it // These allocs should not be preempted @@ -1863,8 +1863,8 @@ func TestSystemSched_Preemption(t *testing.T) { DiskMB: 2 * 1024, }, } - noErr(t, h.State.UpsertJob(h.NextIndex(), job4)) - noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc4})) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job4)) + require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc4})) // Create a system job such that it would need to preempt both allocs to succeed job := mock.SystemJob() @@ -1878,7 +1878,7 @@ func TestSystemSched_Preemption(t *testing.T) { }, }, } - noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + require.NoError(t, h.State.UpsertJob(h.NextIndex(), job)) // Create a mock evaluation to register the job eval := &structs.Evaluation{ @@ -1889,7 +1889,7 @@ func TestSystemSched_Preemption(t *testing.T) { JobID: job.ID, Status: structs.EvalStatusPending, } - noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) + require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) // Process the evaluation err := h.Process(NewSystemScheduler, eval) @@ -1921,7 +1921,7 @@ func TestSystemSched_Preemption(t *testing.T) { // Lookup the allocations by JobID ws := memdb.NewWatchSet() out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) - noErr(t, err) + require.NoError(err) // Ensure all allocations placed require.Equal(2, len(out)) @@ -1945,7 +1945,7 @@ func TestSystemSched_Preemption(t *testing.T) { for _, jobId := range expectedPreemptedJobIDs { out, err = h.State.AllocsByJob(ws, structs.DefaultNamespace, jobId, false) - noErr(t, err) + require.NoError(err) for _, alloc := range out { require.Equal(structs.AllocDesiredStatusEvict, alloc.DesiredStatus) require.Equal(fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocId), alloc.DesiredDescription) diff --git a/scheduler/util_test.go b/scheduler/util_test.go index 8114fa600ed9..73534a6d9aaf 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -15,13 +15,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" ) -// noErr is used to assert there are no errors -func noErr(t *testing.T, err error) { - if err != nil { - t.Fatalf("err: %v", err) - } -} - func TestMaterializeTaskGroups(t *testing.T) { job := mock.Job() index := materializeTaskGroups(job) diff --git a/website/source/guides/upgrade/upgrade-specific.html.md b/website/source/guides/upgrade/upgrade-specific.html.md index e626a37c2dc0..bdd2d7aee23f 100644 --- a/website/source/guides/upgrade/upgrade-specific.html.md +++ b/website/source/guides/upgrade/upgrade-specific.html.md @@ -17,6 +17,14 @@ standard upgrade flow. ## Nomad 0.10.2 +### Preemption Panic Fixed + +Nomad 0.10.2 fixes a [server crashing bug][gh-6787] present in scheduler +preemption since 0.9.0. Users unable to immediately upgrade to Nomad 0.10.2 can +[disable preemption][preemption-api] to avoid the panic. + +### Dangling Docker Container Cleanup + Nomad 0.10.2 addresses an issue occurring in heavily loaded clients, where containers are started without being properly managed by Nomad. Nomad 0.10.2 introduced a reaper that detects and kills such containers. @@ -375,12 +383,14 @@ deleted and then Nomad 0.3.0 can be launched. [drain-api]: /api/nodes.html#drain-node [drain-cli]: /docs/commands/node/drain.html [dangling-containers]: /docs/drivers/docker.html#dangling-containers +[gh-6787]: https://github.com/hashicorp/nomad/issues/6787 [hcl2]: https://github.com/hashicorp/hcl2 [lxc]: /docs/drivers/external/lxc.html [migrate]: /docs/job-specification/migrate.html [plugins]: /docs/drivers/external/index.html [plugin-stanza]: /docs/configuration/plugin.html [preemption]: /docs/internals/scheduling/preemption.html +[preemption-api]: /api/operator.html#update-scheduler-configuration [task-config]: /docs/job-specification/task.html#config [validate]: /docs/commands/job/validate.html [update]: /docs/job-specification/update.html