Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Plan on system scheduler doesn't count nodes who don't meet constraints #1568

Merged
merged 1 commit into from
Aug 12, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions command/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (

"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/jobspec"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/scheduler"
"github.com/mitchellh/colorstring"
)
Expand Down Expand Up @@ -172,7 +173,7 @@ func (c *PlanCommand) Run(args []string) int {

// Print the scheduler dry-run output
c.Ui.Output(c.Colorize().Color("[bold]Scheduler dry-run:[reset]"))
c.Ui.Output(c.Colorize().Color(formatDryRun(resp)))
c.Ui.Output(c.Colorize().Color(formatDryRun(resp, job)))
c.Ui.Output("")

// Print the job index info
Expand Down Expand Up @@ -203,7 +204,7 @@ func formatJobModifyIndex(jobModifyIndex uint64, jobName string) string {
}

// formatDryRun produces a string explaining the results of the dry run.
func formatDryRun(resp *api.JobPlanResponse) string {
func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string {
var rolling *api.Evaluation
for _, eval := range resp.CreatedEvals {
if eval.TriggeredBy == "rolling-update" {
Expand All @@ -215,7 +216,12 @@ func formatDryRun(resp *api.JobPlanResponse) string {
if len(resp.FailedTGAllocs) == 0 {
out = "[bold][green]- All tasks successfully allocated.[reset]\n"
} else {
out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n"
// Change the output depending on if we are a system job or not
if job.Type == "system" {
out = "[bold][yellow]- WARNING: Failed to place allocations on all nodes.[reset]\n"
} else {
out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n"
}
sorted := sortedTaskGroupFromMetrics(resp.FailedTGAllocs)
for _, tg := range sorted {
metrics := resp.FailedTGAllocs[tg]
Expand Down
13 changes: 11 additions & 2 deletions scheduler/system_sched.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,13 +277,22 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
option, _ := s.stack.Select(missing.TaskGroup)

if option == nil {
// if nodes were filtered because of constain mismatches and we
// If nodes were filtered because of constain mismatches and we
// couldn't create an allocation then decrementing queued for that
// task group
if s.ctx.metrics.NodesFiltered > nodesFiltered {
s.queuedAllocs[missing.TaskGroup.Name] -= 1

// If we are annotating the plan, then decrement the desired
// placements based on whether the node meets the constraints
if s.eval.AnnotatePlan && s.plan.Annotations != nil &&
s.plan.Annotations.DesiredTGUpdates != nil {
desired := s.plan.Annotations.DesiredTGUpdates[missing.TaskGroup.Name]
desired.Place -= 1
}
}
// record the current number of nodes filtered in this iteration

// Record the current number of nodes filtered in this iteration
nodesFiltered = s.ctx.metrics.NodesFiltered

// Check if this task group has already failed
Expand Down
22 changes: 17 additions & 5 deletions scheduler/system_sched_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,11 +136,23 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
// Create some nodes
for i := 0; i < 10; i++ {
node := mock.Node()
if i < 9 {
node.NodeClass = "foo"
} else {
node.NodeClass = "bar"
}
node.ComputeClass()
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
}

// Create a job
// Create a job constraining on node class
job := mock.SystemJob()
fooConstraint := &structs.Constraint{
LTarget: "${node.class}",
RTarget: "foo",
Operand: "==",
}
job.Constraints = append(job.Constraints, fooConstraint)
noErr(t, h.State.UpsertJob(h.NextIndex(), job))

// Create a mock evaluation to deregister the job
Expand Down Expand Up @@ -169,16 +181,16 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
for _, allocList := range plan.NodeAllocation {
planned = append(planned, allocList...)
}
if len(planned) != 10 {
t.Fatalf("bad: %#v", plan)
if len(planned) != 9 {
t.Fatalf("bad: %#v %d", planned, len(planned))
}

// Lookup the allocations by JobID
out, err := h.State.AllocsByJob(job.ID)
noErr(t, err)

// Ensure all allocations placed
if len(out) != 10 {
if len(out) != 9 {
t.Fatalf("bad: %#v", out)
}

Expand All @@ -204,7 +216,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
t.Fatalf("expected task group web to have desired changes")
}

expected := &structs.DesiredUpdates{Place: 10}
expected := &structs.DesiredUpdates{Place: 9}
if !reflect.DeepEqual(desiredChanges, expected) {
t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected)
}
Expand Down