Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update enqueue action, import overcommit plugin to limit pending jobs from inqueue. #1298

Merged
merged 3 commits into from
Feb 9, 2021
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ tiers:
- name: proportion
- name: nodeorder
- name: binpack
- name: overcommit
72 changes: 1 addition & 71 deletions pkg/scheduler/actions/enqueue/enqueue.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,6 @@ import (
"volcano.sh/volcano/pkg/scheduler/util"
)

const (
// overCommitFactor is resource overCommit factor for enqueue action
// It determines the number of `pending` pods that the scheduler will tolerate
// when the resources of the cluster is insufficient
overCommitFactor = "overcommit-factor"
)

var (
// defaultOverCommitFactor defines the default overCommit resource factor for enqueue action
defaultOverCommitFactor = 1.2
targetJob = util.Reservation.TargetJob
)

type Action struct{}

func New() *Action {
Expand Down Expand Up @@ -90,31 +77,11 @@ func (enqueue *Action) Execute(ssn *framework.Session) {

klog.V(3).Infof("Try to enqueue PodGroup to %d Queues", len(jobsMap))

total := api.EmptyResource()
used := api.EmptyResource()
lockedNodesIdle := api.EmptyResource()
if targetJob != nil && len(util.Reservation.LockedNodes) != 0 {
for _, node := range util.Reservation.LockedNodes {
lockedNodesIdle.Add(node.Idle)
klog.V(4).Infof("locked node: %s", node.Name)
}
}
for _, node := range ssn.Nodes {
total.Add(node.Allocatable)
used.Add(node.Used)
}
idle := total.Clone().Multi(enqueue.getOverCommitFactor(ssn)).Sub(used).Sub(lockedNodesIdle)

for {
if queues.Empty() {
break
}

if idle.IsEmpty() {
klog.V(3).Infof("Node idle resource is overused, ignore it.")
break
}

queue := queues.Pop().(*api.QueueInfo)

// Found "high" priority job
Expand All @@ -123,52 +90,15 @@ func (enqueue *Action) Execute(ssn *framework.Session) {
continue
}
job := jobs.Pop().(*api.JobInfo)
if targetJob != nil && job.UID == targetJob.UID {
klog.V(3).Infof("Target Job name: %s", targetJob.Name)
continue
}

inqueue := false

if job.PodGroup.Spec.MinResources == nil {
inqueue = true
} else {
minReq := api.NewResource(*job.PodGroup.Spec.MinResources)
if ssn.JobEnqueueable(job) && minReq.LessEqual(idle) {
idle.Sub(minReq)
inqueue = true
}
}

if inqueue {
if job.PodGroup.Spec.MinResources == nil || ssn.JobEnqueueable(job) {
job.PodGroup.Status.Phase = scheduling.PodGroupInqueue
ssn.Jobs[job.UID] = job
}

// Added Queue back until no job in Queue.
queues.Push(queue)
}
// if target job exists, judge whether it can be inqueue or not
if targetJob != nil && targetJob.PodGroup.Status.Phase == scheduling.PodGroupPending && len(util.Reservation.LockedNodes) != 0 {
klog.V(4).Infof("Start to deal with Target Job")
minReq := api.NewResource(*targetJob.PodGroup.Spec.MinResources)
idle = idle.Add(lockedNodesIdle)
if ssn.JobEnqueueable(targetJob) && minReq.LessEqual(idle) {
klog.V(3).Infof("Turn Target Job phase to Inqueue")
targetJob.PodGroup.Status.Phase = scheduling.PodGroupInqueue
ssn.Jobs[targetJob.UID] = targetJob
}
}
}

func (enqueue *Action) UnInitialize() {}

func (enqueue *Action) getOverCommitFactor(ssn *framework.Session) float64 {
factor := defaultOverCommitFactor
arg := framework.GetArgOfActionFromConf(ssn.Configurations, enqueue.Name())
if arg != nil {
arg.GetFloat64(&factor, overCommitFactor)
}

return factor
}
53 changes: 0 additions & 53 deletions pkg/scheduler/actions/enqueue/enqueue_test.go

This file was deleted.

2 changes: 2 additions & 0 deletions pkg/scheduler/conf/scheduler_conf.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ type PluginOption struct {
EnabledTargetJob *bool `yaml:"enableTargetJob"`
// EnabledReservedNodes defines whether reservedNodesFn is enabled
EnabledReservedNodes *bool `yaml:"enableReservedNodes"`
// EnabledJobEnqueued defines whether jobEnqueuedFn is enabled
EnabledJobEnqueued *bool `yaml:"enableJobEnqueued"`
// EnabledVictim defines whether victimsFn is enabled
EnabledVictim *bool `yaml:"enabledVictim"`
// EnabledJobStarving defines whether jobStarvingFn is enabled
Expand Down
15 changes: 11 additions & 4 deletions pkg/scheduler/framework/session_plugins.go
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ func (ssn *Session) JobPipelined(obj interface{}) bool {
return false
}
}
// this tier registed function
// this tier registered function
if hasFound {
return true
}
Expand Down Expand Up @@ -297,7 +297,7 @@ func (ssn *Session) JobStarving(obj interface{}) bool {
return false
}
}
// this tier registed function
// this tier registered function
if hasFound {
return true
}
Expand Down Expand Up @@ -328,18 +328,25 @@ func (ssn *Session) JobValid(obj interface{}) *api.ValidateResult {
// JobEnqueueable invoke jobEnqueueableFns function of the plugins
func (ssn *Session) JobEnqueueable(obj interface{}) bool {
for _, tier := range ssn.Tiers {
var hasFound bool
for _, plugin := range tier.Plugins {
if !isEnabled(plugin.EnabledJobEnqueued) {
continue
}
fn, found := ssn.jobEnqueueableFns[plugin.Name]
if !found {
continue
}

hasFound = true
if res := fn(obj); !res {
return res
}
}
// this tier registered function
if hasFound {
return true
}
}

jiangkaihua marked this conversation as resolved.
Show resolved Hide resolved
return true
}

Expand Down
3 changes: 3 additions & 0 deletions pkg/scheduler/plugins/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ func ApplyPluginConfDefaults(option *conf.PluginOption) {
if option.EnabledJobPipelined == nil {
option.EnabledJobPipelined = &t
}
if option.EnabledJobEnqueued == nil {
option.EnabledJobEnqueued = &t
}
if option.EnabledTaskOrder == nil {
option.EnabledTaskOrder = &t
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/scheduler/plugins/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"volcano.sh/volcano/pkg/scheduler/plugins/drf"
"volcano.sh/volcano/pkg/scheduler/plugins/gang"
"volcano.sh/volcano/pkg/scheduler/plugins/nodeorder"
"volcano.sh/volcano/pkg/scheduler/plugins/overcommit"
"volcano.sh/volcano/pkg/scheduler/plugins/predicates"
"volcano.sh/volcano/pkg/scheduler/plugins/priority"
"volcano.sh/volcano/pkg/scheduler/plugins/proportion"
Expand All @@ -41,6 +42,7 @@ func init() {
framework.RegisterPluginBuilder(binpack.PluginName, binpack.New)
framework.RegisterPluginBuilder(reservation.PluginName, reservation.New)
framework.RegisterPluginBuilder(tdm.PluginName, tdm.New)
framework.RegisterPluginBuilder(overcommit.PluginName, overcommit.New)

// Plugins for Queues
framework.RegisterPluginBuilder(proportion.PluginName, proportion.New)
Expand Down
117 changes: 117 additions & 0 deletions pkg/scheduler/plugins/overcommit/overcommit.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
/*
Copyright 2021 The Volcano Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package overcommit

import (
"time"

"k8s.io/klog"

"volcano.sh/volcano/pkg/apis/scheduling"
"volcano.sh/volcano/pkg/scheduler/api"
"volcano.sh/volcano/pkg/scheduler/framework"
)

const (
// PluginName is name of plugin
PluginName = "overcommit"
// overCommitFactor is resource overCommit factor for enqueue action
// It determines the number of `pending` pods that the scheduler will tolerate
// when the resources of the cluster is insufficient
overCommitFactor = "overcommit-factor"
// defaultOverCommitFactor defines the default overCommit resource factor for enqueue action
defaultOverCommitFactor = 1.2
)

type overcommitPlugin struct {
// Arguments given for the plugin
pluginArguments framework.Arguments
idleResource *api.Resource
inqueueResource *api.Resource
overCommitFactor float64
}

// New function returns overcommit plugin object
func New(arguments framework.Arguments) framework.Plugin {
return &overcommitPlugin{
pluginArguments: arguments,
idleResource: api.EmptyResource(),
overCommitFactor: defaultOverCommitFactor,
}
}

func (op *overcommitPlugin) Name() string {
return PluginName
}

func (op *overcommitPlugin) OnSessionOpen(ssn *framework.Session) {
overcommitStartTime := time.Now().UnixNano()
op.pluginArguments.GetFloat64(&op.overCommitFactor, overCommitFactor)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add overCommitFactor illegal check

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added overcommit-factor validity check: if input < 1, print warning logs & use default value instead.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what happen if overcommit-factor is an invalid number?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what happen if overcommit-factor is an invalid number?

GetFloat64() do not return err, only print warning logs, so here initializes overcommit-factor with an illegal value -1. If its value is not updated after GetFloat64(), it will be processed the sam as condition 'input < 1'.

func (a Arguments) GetFloat64(ptr *float64, key string) {

if op.overCommitFactor < 1.0 {
klog.Warningf("invalid input %f for overcommit-factor, reason: overcommit-factor cannot be less than 1,"+
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

s/invalid/Invalid

" using default value: %f", op.overCommitFactor, defaultOverCommitFactor)
op.overCommitFactor = defaultOverCommitFactor
}
klog.V(4).Infof("overcommit plugin starts, overCommitFactor: %f", op.overCommitFactor)
defer klog.V(4).Infof("overcommit plugin finishes, execution time: %dns",
time.Now().UnixNano()-overcommitStartTime)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please remove time calculation in product, it only used for debug or analysis.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

time calculation is already removed.


// calculate idle resources of total cluster, overcommit resources included
total := api.EmptyResource()
used := api.EmptyResource()
for _, node := range ssn.Nodes {
total.Add(node.Allocatable)
used.Add(node.Used)
}
op.idleResource = total.Clone().Multi(op.overCommitFactor).Sub(used)

// calculate inqueue job resources
inqueue := api.EmptyResource()
for _, job := range ssn.Jobs {
if job.PodGroup.Status.Phase == scheduling.PodGroupInqueue {
inqueue.Add(api.NewResource(*job.PodGroup.Spec.MinResources))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

MinResources maybe nil

}
}
op.inqueueResource = inqueue.Clone()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why clone it?


ssn.AddJobEnqueueableFn(op.Name(), func(obj interface{}) bool {
job := obj.(*api.JobInfo)
idle := op.idleResource
inqueue := api.EmptyResource()
inqueue.Add(op.inqueueResource)
if job.PodGroup.Spec.MinResources == nil {
klog.V(4).Infof("job <%s/%s> is bestEffort, allow it be inqueue", job.Namespace, job.Name)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

s/job/Job

return true
}

//TODO: if allow 1 more job to be inqueue beyond overcommit-factor, large job may be inqueue and create pods
jobMinReq := api.NewResource(*job.PodGroup.Spec.MinResources)
if inqueue.Add(jobMinReq).LessEqual(idle) {
klog.V(4).Infof("sufficient resources, allow job <%s/%s> be inqueue", job.Namespace, job.Name)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

op.inqueueResource.Add(jobMinReq)
return true
}
klog.V(4).Infof("idle resource in cluster is overused, ignore job <%s/%s>",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

change log from "idle resource in cluster is overused" to "resource in cluster is overused, not allow job <%s/%s> be inqueue"

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

modified this log.

job.Namespace, job.Name)
return false
})
}

func (op *overcommitPlugin) OnSessionClose(ssn *framework.Session) {
op.idleResource = nil
op.inqueueResource = nil
}
Loading