Skip to content

Commit

Permalink
Update apis for release v1.1.0
Browse files Browse the repository at this point in the history
Signed-off-by: Siyu Wang <FillZpp.pub@gmail.com>
  • Loading branch information
FillZpp committed Dec 26, 2022
1 parent 96f5a6a commit eee9df9
Show file tree
Hide file tree
Showing 12 changed files with 188 additions and 51 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.22

KOORD_VERSION ?= v0.7.0
KOORD_VERSION ?= v1.1.0

# Set license header files.
LICENSE_HEADER_GO ?= hack/boilerplate/boilerplate.go.txt
Expand Down
14 changes: 7 additions & 7 deletions client/clientset/versioned/fake/register.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 7 additions & 7 deletions client/clientset/versioned/scheme/register.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion extension/elastic_quota.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1"
)

//RootQuotaName means quotaTree's root\head.
// RootQuotaName means quotaTree's root\head.
const (
SystemQuotaName = "system"
RootQuotaName = "root"
Expand Down
17 changes: 17 additions & 0 deletions extension/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,13 @@ const (
)

const (
// NodeCPUBindPolicyNone does not perform any bind policy
NodeCPUBindPolicyNone = "None"
// NodeCPUBindPolicyFullPCPUsOnly requires that the scheduler must allocate full physical cores.
// Equivalent to kubelet CPU manager policy option full-pcpus-only=true.
NodeCPUBindPolicyFullPCPUsOnly = "FullPCPUsOnly"
// NodeCPUBindPolicySpreadByPCPUs requires that the scheduler must evenly allocate logical cpus across physical cores
NodeCPUBindPolicySpreadByPCPUs = "SpreadByPCPUs"
)

const (
Expand Down Expand Up @@ -136,3 +140,16 @@ func GetKubeletCPUManagerPolicy(annotations map[string]string) (*KubeletCPUManag
}
return cpuManagerPolicy, nil
}

func GetNodeCPUBindPolicy(nodeLabels map[string]string, kubeletCPUPolicy *KubeletCPUManagerPolicy) string {
nodeCPUBindPolicy := nodeLabels[LabelNodeCPUBindPolicy]
if nodeCPUBindPolicy == NodeCPUBindPolicyFullPCPUsOnly ||
(kubeletCPUPolicy != nil && kubeletCPUPolicy.Policy == KubeletCPUManagerPolicyStatic &&
kubeletCPUPolicy.Options[KubeletCPUManagerPolicyFullPCPUsOnlyOption] == "true") {
return NodeCPUBindPolicyFullPCPUsOnly
}
if nodeCPUBindPolicy == NodeCPUBindPolicySpreadByPCPUs {
return nodeCPUBindPolicy
}
return NodeCPUBindPolicyNone
}
46 changes: 46 additions & 0 deletions extension/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ const (
// AnnotationResourceStatus represents resource allocation result.
// koord-scheduler patch Pod with the annotation before binding to node.
AnnotationResourceStatus = SchedulingDomainPrefix + "/resource-status"

// AnnotationExtendedResourceSpec specifies the resource requirements of extended resources for internal usage.
// It annotates the requests/limits of extended resources and can be used by runtime proxy and koordlet that
// cannot get the original pod spec in CRI requests.
AnnotationExtendedResourceSpec = NodeDomainPrefix + "/extended-resource-spec"
)

var (
Expand Down Expand Up @@ -177,3 +182,44 @@ func TranslateResourceNameByPriorityClass(priorityClass PriorityClass, defaultRe
}
return ResourceNameMap[priorityClass][defaultResourceName]
}

type ExtendedResourceSpec struct {
Containers map[string]ExtendedResourceContainerSpec `json:"containers,omitempty"`
}

type ExtendedResourceContainerSpec struct {
Limits corev1.ResourceList `json:"limits,omitempty"`
Requests corev1.ResourceList `json:"requests,omitempty"`
}

// GetExtendedResourceSpec parses ExtendedResourceSpec from annotations
func GetExtendedResourceSpec(annotations map[string]string) (*ExtendedResourceSpec, error) {
spec := &ExtendedResourceSpec{}
if annotations == nil {
return spec, nil
}
data, ok := annotations[AnnotationExtendedResourceSpec]
if !ok {
return spec, nil
}
err := json.Unmarshal([]byte(data), spec)
if err != nil {
return nil, err
}
return spec, nil
}

func SetExtendedResourceSpec(pod *corev1.Pod, spec *ExtendedResourceSpec) error {
if pod == nil {
return nil
}
if pod.Annotations == nil {
pod.Annotations = map[string]string{}
}
data, err := json.Marshal(spec)
if err != nil {
return err
}
pod.Annotations[AnnotationExtendedResourceSpec] = string(data)
return nil
}
16 changes: 16 additions & 0 deletions extension/scheduling.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,11 @@ import (
"strconv"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"

schedulingv1alpha1 "github.com/koordinator-sh/apis/scheduling/v1alpha1"
slov1alpha1 "github.com/koordinator-sh/apis/slo/v1alpha1"
)

const (
Expand Down Expand Up @@ -78,7 +80,21 @@ const (

// CustomUsageThresholds supports user-defined node resource utilization thresholds.
type CustomUsageThresholds struct {
// UsageThresholds indicates the resource utilization threshold of the whole machine.
UsageThresholds map[corev1.ResourceName]int64 `json:"usageThresholds,omitempty"`
// ProdUsageThresholds indicates the resource utilization threshold of Prod Pods compared to the whole machine
ProdUsageThresholds map[corev1.ResourceName]int64 `json:"prodUsageThresholds,omitempty"`
// AggregatedUsage supports resource utilization filtering and scoring based on percentile statistics
AggregatedUsage *CustomAggregatedUsage `json:"aggregatedUsage,omitempty"`
}

type CustomAggregatedUsage struct {
// UsageThresholds indicates the resource utilization threshold of the machine based on percentile statistics
UsageThresholds map[corev1.ResourceName]int64 `json:"usageThresholds,omitempty"`
// UsageAggregationType indicates the percentile type of the machine's utilization when filtering
UsageAggregationType slov1alpha1.AggregationType `json:"usageAggregationType,omitempty"`
// UsageAggregatedDuration indicates the statistical period of the percentile of the machine's utilization when filtering
UsageAggregatedDuration *metav1.Duration `json:"usageAggregatedDuration,omitempty"`
}

func GetCustomUsageThresholds(node *corev1.Node) (*CustomUsageThresholds, error) {
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.17

require (
github.com/golang/protobuf v1.5.2
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb
google.golang.org/grpc v1.38.0
google.golang.org/protobuf v1.28.0
k8s.io/api v0.22.6
Expand Down
Loading

0 comments on commit eee9df9

Please sign in to comment.