diff --git a/pkg/scheduler/plugins/reservation/plugin.go b/pkg/scheduler/plugins/reservation/plugin.go index 92809f2ea..1d790bcb0 100644 --- a/pkg/scheduler/plugins/reservation/plugin.go +++ b/pkg/scheduler/plugins/reservation/plugin.go @@ -54,8 +54,6 @@ const ( ErrReasonReservationAllocatePolicyConflict = "node(s) reservation allocate policy conflict" // ErrReasonReservationInactive is the reason for the reservation is failed/succeeded and should not be used. ErrReasonReservationInactive = "reservation is not active" - // ErrReasonNoReservationsMeetRequirements is the reason for no reservation(s) to meet the requirements. - ErrReasonNoReservationsMeetRequirements = "node(s) no reservation(s) to meet the requirements" // ErrReasonPreemptionFailed is the reason for preemption failed ErrReasonPreemptionFailed = "node(s) preemption failed due to insufficient resources" ) @@ -359,8 +357,8 @@ func (pl *Plugin) Filter(ctx context.Context, cycleState *framework.CycleState, if len(state.preemptible[node.Name]) > 0 || len(state.preemptibleInRRs[node.Name]) > 0 { preemptible := state.preemptible[node.Name] preemptibleResource := framework.NewResource(preemptible) - nodeFits := fitsNode(state.podRequestsResources, nodeInfo, &nodeRState, nil, preemptibleResource) - if !nodeFits { + insufficientResources := fitsNode(state.podRequestsResources, nodeInfo, &nodeRState, nil, preemptibleResource) + if len(insufficientResources) != 0 { return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonPreemptionFailed) } } @@ -378,20 +376,22 @@ func (pl *Plugin) filterWithReservations(ctx context.Context, cycleState *framew node := nodeInfo.Node() state := getStateData(cycleState) nodeRState := state.nodeReservationStates[node.Name] - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) - podRequestsResourceNames := quotav1.ResourceNames(podRequests) + podRequestsResourceNames := quotav1.ResourceNames(state.podRequests) var hasSatisfiedReservation bool + insufficientResources := sets.NewString() for _, rInfo := range matchedReservations { resourceNames := quotav1.Intersection(rInfo.ResourceNames, podRequestsResourceNames) if len(resourceNames) == 0 { + insufficientResources.Insert(quotav1.ToSet(podRequestsResourceNames).List()...) continue } preemptibleInRR := state.preemptibleInRRs[node.Name][rInfo.UID()] preemptible := framework.NewResource(preemptibleInRR) preemptible.Add(state.preemptible[node.Name]) - nodeFits := fitsNode(state.podRequestsResources, nodeInfo, &nodeRState, rInfo, preemptible) + insufficientResourcesByNode := fitsNode(state.podRequestsResources, nodeInfo, &nodeRState, rInfo, preemptible) + nodeFits := len(insufficientResourcesByNode) == 0 allocatePolicy := rInfo.GetAllocatePolicy() if allocatePolicy == schedulingv1alpha1.ReservationAllocatePolicyDefault || allocatePolicy == schedulingv1alpha1.ReservationAllocatePolicyAligned { @@ -399,6 +399,7 @@ func (pl *Plugin) filterWithReservations(ctx context.Context, cycleState *framew hasSatisfiedReservation = true break } + insufficientResources.Insert(quotav1.ToSet(insufficientResourcesByNode).List()...) } else if allocatePolicy == schedulingv1alpha1.ReservationAllocatePolicyRestricted { allocated := rInfo.Allocated if len(preemptibleInRR) > 0 { @@ -407,16 +408,23 @@ func (pl *Plugin) filterWithReservations(ctx context.Context, cycleState *framew allocated = quotav1.Mask(allocated, rInfo.ResourceNames) rRemained := quotav1.SubtractWithNonNegativeResult(rInfo.Allocatable, allocated) requests := quotav1.Mask(state.podRequests, rInfo.ResourceNames) - fits, _ := quotav1.LessThanOrEqual(requests, rRemained) + fits, insufficientResourcesByReservation := quotav1.LessThanOrEqual(requests, rRemained) if fits && nodeFits { hasSatisfiedReservation = true break } + insufficientResources.Insert(quotav1.ToSet(insufficientResourcesByNode).List()...) + insufficientResources.Insert((quotav1.ToSet(insufficientResourcesByReservation)).List()...) } } // The Pod requirement must be allocated from Reservation, but currently no Reservation meets the requirement if !hasSatisfiedReservation && requiredFromReservation { - return framework.NewStatus(framework.Unschedulable, ErrReasonNoReservationsMeetRequirements) + // We will keep all failure reasons. + failureReasons := make([]string, 0, len(insufficientResources)) + for insufficientResource := range insufficientResources { + failureReasons = append(failureReasons, fmt.Sprintf("Insufficient reservation %s", insufficientResource)) + } + return framework.NewStatus(framework.Unschedulable, failureReasons...) } return nil } @@ -424,17 +432,19 @@ func (pl *Plugin) filterWithReservations(ctx context.Context, cycleState *framew var dummyResource = framework.NewResource(nil) // fitsNode checks if node have enough resources to host the pod. -func fitsNode(podRequest *framework.Resource, nodeInfo *framework.NodeInfo, nodeRState *nodeReservationState, rInfo *frameworkext.ReservationInfo, preemptible *framework.Resource) bool { +func fitsNode(podRequest *framework.Resource, nodeInfo *framework.NodeInfo, nodeRState *nodeReservationState, rInfo *frameworkext.ReservationInfo, preemptible *framework.Resource) []corev1.ResourceName { + insufficientResources := make([]corev1.ResourceName, 0, 4) + allowedPodNumber := nodeInfo.Allocatable.AllowedPodNumber if len(nodeInfo.Pods)-len(nodeRState.matched)+1 > allowedPodNumber { - return false + insufficientResources = append(insufficientResources, corev1.ResourcePods) } if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.EphemeralStorage == 0 && len(podRequest.ScalarResources) == 0 { - return true + return insufficientResources } var rRemained *framework.Resource @@ -457,22 +467,22 @@ func fitsNode(podRequest *framework.Resource, nodeInfo *framework.NodeInfo, node } if podRequest.MilliCPU > (nodeInfo.Allocatable.MilliCPU - (podRequested.MilliCPU - rRemained.MilliCPU - allRAllocated.MilliCPU - preemptible.MilliCPU)) { - return false + insufficientResources = append(insufficientResources, corev1.ResourceCPU) } if podRequest.Memory > (nodeInfo.Allocatable.Memory - (podRequested.Memory - rRemained.Memory - allRAllocated.Memory - preemptible.Memory)) { - return false + insufficientResources = append(insufficientResources, corev1.ResourceMemory) } if podRequest.EphemeralStorage > (nodeInfo.Allocatable.EphemeralStorage - (podRequested.EphemeralStorage - rRemained.EphemeralStorage - allRAllocated.EphemeralStorage - preemptible.EphemeralStorage)) { - return false + insufficientResources = append(insufficientResources, corev1.ResourceEphemeralStorage) } for rName, rQuant := range podRequest.ScalarResources { if rQuant > (nodeInfo.Allocatable.ScalarResources[rName] - (podRequested.ScalarResources[rName] - rRemained.ScalarResources[rName] - allRAllocated.ScalarResources[rName] - preemptible.ScalarResources[rName])) { - return false + insufficientResources = append(insufficientResources, rName) } } - return true + return insufficientResources } func (pl *Plugin) PostFilter(ctx context.Context, cycleState *framework.CycleState, pod *corev1.Pod, _ framework.NodeToStatusMap) (*framework.PostFilterResult, *framework.Status) { diff --git a/pkg/scheduler/plugins/reservation/plugin_test.go b/pkg/scheduler/plugins/reservation/plugin_test.go index d068b1402..e63124e27 100644 --- a/pkg/scheduler/plugins/reservation/plugin_test.go +++ b/pkg/scheduler/plugins/reservation/plugin_test.go @@ -772,7 +772,7 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonNoReservationsMeetRequirements), + wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient reservation cpu"), }, { name: "filter restricted reservation with nodeInfo", @@ -861,7 +861,7 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonNoReservationsMeetRequirements), + wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient reservation cpu"), }, { name: "filter default reservations with preemption", @@ -962,6 +962,9 @@ func Test_filterWithReservations(t *testing.T) { name: "failed to filter default reservations with preempt from reservation", stateData: &stateData{ hasAffinity: true, + podRequests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + }, podRequestsResources: &framework.Resource{ MilliCPU: 4 * 1000, }, @@ -1002,12 +1005,15 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonNoReservationsMeetRequirements), + wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient reservation cpu"), }, { name: "failed to filter default reservations with preempt from node", stateData: &stateData{ hasAffinity: true, + podRequests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + }, podRequestsResources: &framework.Resource{ MilliCPU: 4 * 1000, }, @@ -1046,7 +1052,7 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonNoReservationsMeetRequirements), + wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient reservation cpu"), }, { name: "filter restricted reservations with preempt from reservation", @@ -1142,7 +1148,7 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonNoReservationsMeetRequirements), + wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient reservation cpu"), }, { name: "failed to filter restricted reservations with preempt from reservation and node", @@ -1199,7 +1205,7 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonNoReservationsMeetRequirements), + wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient reservation cpu"), }, { name: "filter restricted reservations with preempt from reservation and node", @@ -1673,7 +1679,7 @@ func TestFilterReservation(t *testing.T) { reservation4C8G, }, targetReservation: reservation2C4G, - wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonNoReservationsMeetRequirements), + wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient reservation ephemeral-storage"), }, { name: "failed with allocateOnce and allocated reservation",