Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

scheduler: improve plugin args #1857

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 8 additions & 7 deletions pkg/scheduler/apis/config/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ type ReservationArgs struct {
metav1.TypeMeta

// EnablePreemption indicates whether to enable preemption for reservations.
EnablePreemption *bool
EnablePreemption bool
}

// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
Expand All @@ -167,10 +167,10 @@ type ElasticQuotaArgs struct {
metav1.TypeMeta

// DelayEvictTime is the duration to handle the jitter of used and runtime
DelayEvictTime *metav1.Duration
DelayEvictTime metav1.Duration

// RevokePodInterval is the interval to check quotaGroup's used and runtime
RevokePodInterval *metav1.Duration
RevokePodInterval metav1.Duration

// DefaultQuotaGroupMax limit the maxQuota of DefaultQuotaGroup
DefaultQuotaGroupMax corev1.ResourceList
Expand All @@ -182,10 +182,10 @@ type ElasticQuotaArgs struct {
QuotaGroupNamespace string

// MonitorAllQuotas monitor the quotaGroups' used and runtime Quota to revoke pods
MonitorAllQuotas *bool
MonitorAllQuotas bool

// EnableCheckParentQuota check parentQuotaGroups' used and runtime Quota in PreFilter
EnableCheckParentQuota *bool
EnableCheckParentQuota bool

// EnableRuntimeQuota if true, use max instead of runtime for all checks.
EnableRuntimeQuota bool
Expand All @@ -199,10 +199,10 @@ type CoschedulingArgs struct {

// DefaultTimeout is the default gang's waiting time in Permit stage
// default is 600 seconds
DefaultTimeout *metav1.Duration
DefaultTimeout metav1.Duration
// Workers number of controller
// default is 1
ControllerWorkers *int64
ControllerWorkers int64
// Skip check schedule cycle
// default is false
SkipCheckScheduleCycle bool
Expand All @@ -215,6 +215,7 @@ type DeviceShareArgs struct {
metav1.TypeMeta

// Allocator indicates the expected allocator to use
// Deprecated: Adapting to different allocators is no longer supported.
Allocator string
// ScoringStrategy selects the device resource scoring strategy.
ScoringStrategy *ScoringStrategy
Expand Down
3 changes: 2 additions & 1 deletion pkg/scheduler/apis/config/v1beta2/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ type ElasticQuotaArgs struct {
EnableCheckParentQuota *bool `json:"enableCheckParentQuota,omitempty"`

// EnableRuntimeQuota if false, use max instead of runtime for all checks.
EnableRuntimeQuota *bool
EnableRuntimeQuota *bool `json:"enableRuntimeQuota,omitempty"`
}

// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
Expand All @@ -210,6 +210,7 @@ type DeviceShareArgs struct {
metav1.TypeMeta

// Allocator indicates the expected allocator to use
// Deprecated: Adapting to different allocators is no longer supported.
Allocator string `json:"allocator,omitempty"`
// ScoringStrategy selects the device resource scoring strategy.
ScoringStrategy *ScoringStrategy `json:"scoringStrategy,omitempty"`
Expand Down
56 changes: 42 additions & 14 deletions pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions pkg/scheduler/apis/config/validation/validation_pluginargs.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,22 +109,22 @@ func ValidateElasticQuotaArgs(elasticArgs *config.ElasticQuotaArgs) error {
}
}

if elasticArgs.DelayEvictTime != nil && elasticArgs.DelayEvictTime.Duration < 0 {
if elasticArgs.DelayEvictTime.Duration < 0 {
return fmt.Errorf("elasticQuotaArgs error, DelayEvictTime should be a positive value")
}

if elasticArgs.RevokePodInterval != nil && elasticArgs.RevokePodInterval.Duration < 0 {
if elasticArgs.RevokePodInterval.Duration < 0 {
return fmt.Errorf("elasticQuotaArgs error, RevokePodCycle should be a positive value")
}

return nil
}

func ValidateCoschedulingArgs(coeSchedulingArgs *config.CoschedulingArgs) error {
if coeSchedulingArgs.DefaultTimeout != nil && coeSchedulingArgs.DefaultTimeout.Duration < 0 {
if coeSchedulingArgs.DefaultTimeout.Duration < 0 {
return fmt.Errorf("coeSchedulingArgs DefaultTimeoutSeconds invalid")
}
if coeSchedulingArgs.ControllerWorkers != nil && *coeSchedulingArgs.ControllerWorkers < 1 {
if coeSchedulingArgs.ControllerWorkers < 1 {
return fmt.Errorf("coeSchedulingArgs ControllerWorkers invalid")
}
return nil
Expand Down
55 changes: 11 additions & 44 deletions pkg/scheduler/apis/config/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ func setUp(ctx context.Context, podNames []string, pgName string, podPhase v1.Po
koordClient := koordfake.NewSimpleClientset()
koordInformerFactory := koordinformers.NewSharedInformerFactory(koordClient, 0)

args := &config.CoschedulingArgs{DefaultTimeout: &metav1.Duration{Duration: time.Second}}
args := &config.CoschedulingArgs{DefaultTimeout: metav1.Duration{Duration: time.Second}}
pgMgr := core.NewPodGroupManager(args, pgClient, pgInformerFactory, informerFactory, koordInformerFactory)
ctrl := NewPodGroupController(pgInformer, podInformer, pgClient, pgMgr, 1)
return ctrl, kubeClient, pgClient
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/coscheduling/core/core_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func NewManagerForTest() *Mgr {
koordClient := koordfake.NewSimpleClientset()
koordInformerFactory := koordinformers.NewSharedInformerFactory(koordClient, 0)

args := &config.CoschedulingArgs{DefaultTimeout: &metav1.Duration{Duration: 300 * time.Second}}
args := &config.CoschedulingArgs{DefaultTimeout: metav1.Duration{Duration: 300 * time.Second}}

pgManager := NewPodGroupManager(args, pgClient, pgInformerFactory, informerFactory, koordInformerFactory)
return &Mgr{
Expand Down
7 changes: 1 addition & 6 deletions pkg/scheduler/plugins/coscheduling/core/gang.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,12 +154,7 @@ func (gang *Gang) tryInitByPodConfig(pod *v1.Pod, args *config.CoschedulingArgs)
if err != nil || waitTime <= 0 {
klog.Errorf("pod's annotation GangWaitTimeAnnotation illegal, gangName: %v, value: %v",
gang.Name, pod.Annotations[extension.AnnotationGangWaitTime])
if args.DefaultTimeout != nil {
waitTime = args.DefaultTimeout.Duration
} else {
klog.Errorf("gangArgs DefaultTimeoutSeconds is nil")
waitTime = 0
}
waitTime = args.DefaultTimeout.Duration
}
gang.WaitTime = waitTime

Expand Down
6 changes: 3 additions & 3 deletions pkg/scheduler/plugins/coscheduling/core/gang_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -808,7 +808,7 @@ func TestGangCache_OnPodDelete(t *testing.T) {
pgInformerFactory := pgformers.NewSharedInformerFactory(pgClient, 0)
pgInformer := pgInformerFactory.Scheduling().V1alpha1().PodGroups()
pglister := pgInformer.Lister()
gangCache := NewGangCache(&config.CoschedulingArgs{DefaultTimeout: &metav1.Duration{Duration: time.Second}}, nil, pglister, pgClient)
gangCache := NewGangCache(&config.CoschedulingArgs{DefaultTimeout: metav1.Duration{Duration: time.Second}}, nil, pglister, pgClient)
for _, pg := range tt.podGroups {
err := retry.OnError(
retry.DefaultRetry,
Expand Down Expand Up @@ -963,7 +963,7 @@ func TestGangCache_OnPodGroupAdd(t *testing.T) {
}()
timeNowFn = fakeTimeNowFn
pgClient := fakepgclientset.NewSimpleClientset()
gangCache := NewGangCache(&config.CoschedulingArgs{DefaultTimeout: &metav1.Duration{Duration: time.Second}}, nil, nil, pgClient)
gangCache := NewGangCache(&config.CoschedulingArgs{DefaultTimeout: metav1.Duration{Duration: time.Second}}, nil, nil, pgClient)
for _, pg := range tt.pgs {
gangCache.onPodGroupAdd(pg)
}
Expand Down Expand Up @@ -1093,7 +1093,7 @@ func TestGangCache_onPodGroupUpdate(t *testing.T) {
pgInformerFactory := pgformers.NewSharedInformerFactory(pgClient, 0)
pgInformer := pgInformerFactory.Scheduling().V1alpha1().PodGroups()
pglister := pgInformer.Lister()
cache := NewGangCache(&config.CoschedulingArgs{DefaultTimeout: &metav1.Duration{Duration: time.Second}}, nil, pglister, pgClient)
cache := NewGangCache(&config.CoschedulingArgs{DefaultTimeout: metav1.Duration{Duration: time.Second}}, nil, pglister, pgClient)

// init gang
podGroup := &v1alpha1.PodGroup{
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/coscheduling/plugin_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func (cs *Coscheduling) NewControllers() ([]frameworkext.Controller, error) {
if cs.args == nil {
controllerWorkers = 1
} else {
controllerWorkers = int(*cs.args.ControllerWorkers)
controllerWorkers = int(cs.args.ControllerWorkers)
}
podGroupController := controller.NewPodGroupController(cs.pgInformer, podInformer, cs.pgClient, pgMgr, controllerWorkers)
return []frameworkext.Controller{podGroupController}, nil
Expand Down
Loading