Skip to content

Commit

Permalink
UPSTREAM: 98073: (e2e/scheduler) Ensure minimum memory limit in creat…
Browse files Browse the repository at this point in the history
…eBalancedPodForNodes
  • Loading branch information
damemi committed Feb 19, 2021
1 parent 68104e8 commit 110e007
Showing 1 changed file with 31 additions and 4 deletions.
35 changes: 31 additions & 4 deletions test/e2e/scheduling/priorities.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,11 @@ type Resource struct {

var balancePodLabel = map[string]string{"podname": "priority-balanced-memory"}

// track min memory limit based on crio minimum. pods cannot set a limit lower than this
// see: https://github.com/cri-o/cri-o/blob/29805b13e9a43d9d22628553db337ce1c1bec0a8/internal/config/cgmgr/cgmgr.go#L23
// see: https://bugzilla.redhat.com/show_bug.cgi?id=1595256
var crioMinMemLimit = 12 * 1024 * 1024

var podRequestedResource = &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"),
Expand Down Expand Up @@ -121,6 +126,19 @@ func removeAvoidPodsOffNode(c clientset.Interface, nodeName string) {
framework.ExpectNoError(err)
}

// nodesAreTooUtilized ensures that each node can support 2*crioMinMemLimit
// We check for double because it needs to support at least the cri-o minimum
// plus whatever delta between node usages (which could be up to or at least crioMinMemLimit)
func nodesAreTooUtilized(cs clientset.Interface, nodeList *v1.NodeList) bool {
for _, node := range nodeList.Items {
_, memFraction, _, memAllocatable := computeCPUMemFraction(cs, node, podRequestedResource)
if float64(memAllocatable)-(memFraction*float64(memAllocatable)) < float64(2*crioMinMemLimit) {
return true
}
}
return false
}

// This test suite is used to verifies scheduler priority functions based on the default provider
var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var cs clientset.Interface
Expand Down Expand Up @@ -149,6 +167,12 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
framework.ExpectNoError(err)
err = e2epod.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
framework.ExpectNoError(err)

// skip if the most utilized node has less than the cri-o minMemLimit available
// otherwise we will not be able to run the test pod once all nodes are balanced
if nodesAreTooUtilized(cs, nodeList) {
ginkgo.Skip("nodes are too utilized to schedule test pods")
}
})

ginkgo.It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() {
Expand Down Expand Up @@ -483,8 +507,9 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
var maxCPUFraction, maxMemFraction float64 = ratio, ratio
var cpuFractionMap = make(map[string]float64)
var memFractionMap = make(map[string]float64)

for _, node := range nodes {
cpuFraction, memFraction := computeCPUMemFraction(cs, node, requestedResource)
cpuFraction, memFraction, _, _ := computeCPUMemFraction(cs, node, requestedResource)
cpuFractionMap[node.Name] = cpuFraction
memFractionMap[node.Name] = memFraction
if cpuFraction > maxCPUFraction {
Expand All @@ -494,6 +519,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
maxMemFraction = memFraction
}
}

// we need the max one to keep the same cpu/mem use rate
ratio = math.Max(maxCPUFraction, maxMemFraction)
for _, node := range nodes {
Expand All @@ -510,7 +536,8 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
memFraction := memFractionMap[node.Name]
needCreateResource[v1.ResourceCPU] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI)

needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)), resource.BinarySI)
// add crioMinMemLimit to ensure that all pods are setting at least that much for a limit, while keeping the same ratios
needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)+float64(crioMinMemLimit)), resource.BinarySI)

podConfig := &pausePodConfig{
Name: "",
Expand Down Expand Up @@ -550,7 +577,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n
return cleanUp, nil
}

func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) {
func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64, int64, int64) {
framework.Logf("ComputeCPUMemFraction for node: %v", node.Name)
totalRequestedCPUResource := resource.Requests.Cpu().MilliValue()
totalRequestedMemResource := resource.Requests.Memory().Value()
Expand Down Expand Up @@ -589,7 +616,7 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re
framework.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
framework.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)

return cpuFraction, memFraction
return cpuFraction, memFraction, cpuAllocatableMil, memAllocatableVal
}

func getNonZeroRequests(pod *v1.Pod) Resource {
Expand Down

0 comments on commit 110e007

Please sign in to comment.