diff --git a/.changelog/17195.txt b/.changelog/17195.txt new file mode 100644 index 000000000000..b6f05c06c2a4 --- /dev/null +++ b/.changelog/17195.txt @@ -0,0 +1,3 @@ +```release-note:bug +scheduler: Fixed a bug where implicit `spread` targets were treated as separate targets for scoring +``` diff --git a/scheduler/propertyset.go b/scheduler/propertyset.go index 27a9a0e4d6eb..9b2ee3d75866 100644 --- a/scheduler/propertyset.go +++ b/scheduler/propertyset.go @@ -9,6 +9,8 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" + "golang.org/x/exp/slices" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -33,6 +35,10 @@ type propertySet struct { // targetAttribute is the attribute this property set is checking targetAttribute string + // targetValues are the set of attribute values that are explictly expected, + // so we can combine the count of values that belong to any explicit target. + targetValues []string + // allowedCount is the allowed number of allocations that can have the // distinct property allowedCount uint64 @@ -62,6 +68,7 @@ func NewPropertySet(ctx Context, job *structs.Job) *propertySet { jobID: job.ID, namespace: job.Namespace, existingValues: make(map[string]uint64), + targetValues: []string{}, logger: ctx.Logger().Named("property_set"), } @@ -130,6 +137,10 @@ func (p *propertySet) setTargetAttributeWithCount(targetAttribute string, allowe p.PopulateProposed() } +func (p *propertySet) SetTargetValues(values []string) { + p.targetValues = values +} + // populateExisting is a helper shared when setting the constraint to populate // the existing values. func (p *propertySet) populateExisting() { @@ -231,7 +242,7 @@ func (p *propertySet) SatisfiesDistinctProperties(option *structs.Node, tg strin // UsedCount returns the number of times the value of the attribute being tracked by this // property set is used across current and proposed allocations. It also returns the resolved // attribute value for the node, and an error message if it couldn't be resolved correctly -func (p *propertySet) UsedCount(option *structs.Node, tg string) (string, string, uint64) { +func (p *propertySet) UsedCount(option *structs.Node, _ string) (string, string, uint64) { // Check if there was an error building if p.errorBuilding != nil { return "", p.errorBuilding.Error(), 0 @@ -239,12 +250,13 @@ func (p *propertySet) UsedCount(option *structs.Node, tg string) (string, string // Get the nodes property value nValue, ok := getProperty(option, p.targetAttribute) + targetPropertyValue := p.targetedPropertyValue(nValue) if !ok { return nValue, fmt.Sprintf("missing property %q", p.targetAttribute), 0 } combinedUse := p.GetCombinedUseMap() - usedCount := combinedUse[nValue] - return nValue, "", usedCount + usedCount := combinedUse[targetPropertyValue] + return targetPropertyValue, "", usedCount } // GetCombinedUseMap counts how many times the property has been used by @@ -254,23 +266,25 @@ func (p *propertySet) GetCombinedUseMap() map[string]uint64 { combinedUse := make(map[string]uint64, helper.Max(len(p.existingValues), len(p.proposedValues))) for _, usedValues := range []map[string]uint64{p.existingValues, p.proposedValues} { for propertyValue, usedCount := range usedValues { - combinedUse[propertyValue] += usedCount + targetPropertyValue := p.targetedPropertyValue(propertyValue) + combinedUse[targetPropertyValue] += usedCount } } // Go through and discount the combined count when the value has been // cleared by a proposed stop. for propertyValue, clearedCount := range p.clearedValues { - combined, ok := combinedUse[propertyValue] + targetPropertyValue := p.targetedPropertyValue(propertyValue) + combined, ok := combinedUse[targetPropertyValue] if !ok { continue } // Don't clear below 0. if combined >= clearedCount { - combinedUse[propertyValue] = combined - clearedCount + combinedUse[targetPropertyValue] = combined - clearedCount } else { - combinedUse[propertyValue] = 0 + combinedUse[targetPropertyValue] = 0 } } return combinedUse @@ -335,7 +349,8 @@ func (p *propertySet) populateProperties(allocs []*structs.Allocation, nodes map continue } - properties[nProperty]++ + targetPropertyValue := p.targetedPropertyValue(nProperty) + properties[targetPropertyValue]++ } } @@ -347,3 +362,14 @@ func getProperty(n *structs.Node, property string) (string, bool) { return resolveTarget(property, n) } + +// targetedPropertyValue transforms the property value to combine all implicit +// target values into a single wildcard placeholder so that we get accurate +// counts when we compare an explicitly-defined target against multiple implicit +// targets. +func (p *propertySet) targetedPropertyValue(propertyValue string) string { + if len(p.targetValues) == 0 || slices.Contains(p.targetValues, propertyValue) { + return propertyValue + } + return "*" +} diff --git a/scheduler/spread.go b/scheduler/spread.go index 5dbb3d0aa5df..94712536484c 100644 --- a/scheduler/spread.go +++ b/scheduler/spread.go @@ -4,6 +4,7 @@ package scheduler import ( + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -91,6 +92,8 @@ func (iter *SpreadIterator) SetTaskGroup(tg *structs.TaskGroup) { for _, spread := range iter.jobSpreads { pset := NewPropertySet(iter.ctx, iter.job) pset.SetTargetAttribute(spread.Attribute, tg.Name) + pset.SetTargetValues(helper.ConvertSlice(spread.SpreadTarget, + func(t *structs.SpreadTarget) string { return t.Value })) iter.groupPropertySets[tg.Name] = append(iter.groupPropertySets[tg.Name], pset) } @@ -98,6 +101,8 @@ func (iter *SpreadIterator) SetTaskGroup(tg *structs.TaskGroup) { for _, spread := range tg.Spreads { pset := NewPropertySet(iter.ctx, iter.job) pset.SetTargetAttribute(spread.Attribute, tg.Name) + pset.SetTargetValues(helper.ConvertSlice(spread.SpreadTarget, + func(t *structs.SpreadTarget) string { return t.Value })) iter.groupPropertySets[tg.Name] = append(iter.groupPropertySets[tg.Name], pset) } } diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index 7c36e033fc53..ffbada4200c6 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -11,12 +11,15 @@ import ( "testing" "time" + "github.com/shoenig/test" + "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" ) func TestSpreadIterator_SingleAttribute(t *testing.T) { @@ -921,3 +924,134 @@ func TestSpreadPanicDowngrade(t *testing.T) { require.NoError(t, processErr, "failed to process eval") require.Len(t, h.Plans, 1) } + +func TestSpread_ImplicitTargets(t *testing.T) { + + dcs := []string{"dc1", "dc2", "dc3"} + + setupNodes := func(h *Harness) map[string]string { + nodesToDcs := map[string]string{} + var nodes []*RankedNode + + for i, dc := range dcs { + for n := 0; n < 4; n++ { + node := mock.Node() + node.Datacenter = dc + must.NoError(t, h.State.UpsertNode( + structs.MsgTypeTestSetup, uint64(100+i), node)) + nodes = append(nodes, &RankedNode{Node: node}) + nodesToDcs[node.ID] = node.Datacenter + } + } + return nodesToDcs + } + + setupJob := func(h *Harness, testCaseSpread *structs.Spread) *structs.Evaluation { + job := mock.MinJob() + job.Datacenters = dcs + job.TaskGroups[0].Count = 12 + + job.TaskGroups[0].Spreads = []*structs.Spread{testCaseSpread} + must.NoError(t, h.State.UpsertJob( + structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + + eval := &structs.Evaluation{ + Namespace: structs.DefaultNamespace, + ID: uuid.Generate(), + Priority: job.Priority, + TriggeredBy: structs.EvalTriggerJobRegister, + JobID: job.ID, + Status: structs.EvalStatusPending, + } + must.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, + h.NextIndex(), []*structs.Evaluation{eval})) + + return eval + } + + testCases := []struct { + name string + spread *structs.Spread + expect map[string]int + }{ + { + + name: "empty implicit target", + spread: &structs.Spread{ + Weight: 100, + Attribute: "${node.datacenter}", + SpreadTarget: []*structs.SpreadTarget{ + { + Value: "dc1", + Percent: 50, + }, + }, + }, + expect: map[string]int{"dc1": 6}, + }, + { + name: "wildcard implicit target", + spread: &structs.Spread{ + Weight: 100, + Attribute: "${node.datacenter}", + SpreadTarget: []*structs.SpreadTarget{ + { + Value: "dc1", + Percent: 50, + }, + { + Value: "*", + Percent: 50, + }, + }, + }, + expect: map[string]int{"dc1": 6}, + }, + { + name: "explicit targets", + spread: &structs.Spread{ + Weight: 100, + Attribute: "${node.datacenter}", + SpreadTarget: []*structs.SpreadTarget{ + { + Value: "dc1", + Percent: 50, + }, + { + Value: "dc2", + Percent: 25, + }, + { + Value: "dc3", + Percent: 25, + }, + }, + }, + expect: map[string]int{"dc1": 6, "dc2": 3, "dc3": 3}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + h := NewHarness(t) + nodesToDcs := setupNodes(h) + eval := setupJob(h, tc.spread) + must.NoError(t, h.Process(NewServiceScheduler, eval)) + must.Len(t, 1, h.Plans) + + plan := h.Plans[0] + must.False(t, plan.IsNoOp()) + + dcCounts := map[string]int{} + for node, allocs := range plan.NodeAllocation { + dcCounts[nodesToDcs[node]] += len(allocs) + } + for dc, expectVal := range tc.expect { + // not using test.MapEqual here because we have incomplete + // expectations for the implicit DCs on some tests. + test.Eq(t, expectVal, dcCounts[dc], + test.Sprintf("expected %d in %q", expectVal, dc)) + } + + }) + } +}