diff --git a/pkg/scheduler/plugins/deviceshare/plugin_test.go b/pkg/scheduler/plugins/deviceshare/plugin_test.go index c710259fa..08d280309 100644 --- a/pkg/scheduler/plugins/deviceshare/plugin_test.go +++ b/pkg/scheduler/plugins/deviceshare/plugin_test.go @@ -2058,7 +2058,7 @@ func Test_Plugin_FilterReservation(t *testing.T) { assert.True(t, status.IsSuccess()) status = pl.FilterReservation(context.TODO(), cycleState, pod, reservationInfo, "test-node-1") - assert.Equal(t, framework.NewStatus(framework.Unschedulable, "node(s) no reservation(s) to meet the device requirements"), status) + assert.Equal(t, framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient gpu devices"), status) } func Test_Plugin_Reserve(t *testing.T) { diff --git a/pkg/scheduler/plugins/deviceshare/reservation.go b/pkg/scheduler/plugins/deviceshare/reservation.go index ab18c0a86..2bae4e187 100644 --- a/pkg/scheduler/plugins/deviceshare/reservation.go +++ b/pkg/scheduler/plugins/deviceshare/reservation.go @@ -198,6 +198,7 @@ func (p *Plugin) tryAllocateFromReservation( basicPreemptible = appendAllocated(nil, basicPreemptible, restoreState.mergedMatchedAllocated) + var reservationReasons []*framework.Status for _, alloc := range matchedReservations { rInfo := alloc.rInfo preemptibleInRR := state.preemptibleInRRs[node.Name][rInfo.UID()] @@ -222,6 +223,7 @@ func (p *Plugin) tryAllocateFromReservation( hasSatisfiedReservation = true break } + reservationReasons = append(reservationReasons, status) } else if allocatePolicy == schedulingv1alpha1.ReservationAllocatePolicyRestricted { _, status := allocator.Allocate(preferred, preferred, nil, preemptible) if status.IsSuccess() { @@ -236,15 +238,26 @@ func (p *Plugin) tryAllocateFromReservation( hasSatisfiedReservation = true break } + reservationReasons = append(reservationReasons, status) } } } if !hasSatisfiedReservation && requiredFromReservation { - return nil, framework.NewStatus(framework.Unschedulable, "node(s) no reservation(s) to meet the device requirements") + return nil, framework.NewStatus(framework.Unschedulable, p.makeReasonsByReservation(reservationReasons)...) } return result, nil } +func (p *Plugin) makeReasonsByReservation(reservationReasons []*framework.Status) []string { + var reasons []string + for _, status := range reservationReasons { + for _, r := range status.Reasons() { + reasons = append(reasons, fmt.Sprintf("Reservation(s) %s", r)) + } + } + return reasons +} + // scoreWithReservation combine the reservation with the node's resource usage to calculate the reservation score. func (p *Plugin) scoreWithReservation( allocator *AutopilotAllocator, diff --git a/pkg/scheduler/plugins/deviceshare/reservation_test.go b/pkg/scheduler/plugins/deviceshare/reservation_test.go index 9e3bebf6a..0febc137a 100644 --- a/pkg/scheduler/plugins/deviceshare/reservation_test.go +++ b/pkg/scheduler/plugins/deviceshare/reservation_test.go @@ -523,7 +523,7 @@ func Test_tryAllocateFromReservation(t *testing.T) { }, requiredFromReservation: true, wantResult: nil, - wantStatus: framework.NewStatus(framework.Unschedulable, "node(s) no reservation(s) to meet the device requirements"), + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient gpu devices"), }, { name: "failed to allocate from Aligned policy reservation that remaining little not fits request", @@ -566,7 +566,7 @@ func Test_tryAllocateFromReservation(t *testing.T) { }, requiredFromReservation: true, wantResult: nil, - wantStatus: framework.NewStatus(framework.Unschedulable, "node(s) no reservation(s) to meet the device requirements"), + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient gpu devices"), }, { name: "allocate from Restricted policy reservation", @@ -650,7 +650,7 @@ func Test_tryAllocateFromReservation(t *testing.T) { }, requiredFromReservation: true, wantResult: nil, - wantStatus: framework.NewStatus(framework.Unschedulable, "node(s) no reservation(s) to meet the device requirements"), + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient gpu devices"), }, } diff --git a/pkg/scheduler/plugins/reservation/plugin.go b/pkg/scheduler/plugins/reservation/plugin.go index 913956109..8ce6b4a6c 100644 --- a/pkg/scheduler/plugins/reservation/plugin.go +++ b/pkg/scheduler/plugins/reservation/plugin.go @@ -19,6 +19,8 @@ package reservation import ( "context" "fmt" + "strconv" + "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -152,14 +154,16 @@ type stateData struct { preemptible map[string]corev1.ResourceList preemptibleInRRs map[string]map[types.UID]corev1.ResourceList - nodeReservationStates map[string]nodeReservationState - preferredNode string - assumed *frameworkext.ReservationInfo + nodeReservationStates map[string]nodeReservationState + nodeReservationDiagnosis map[string]nodeDiagnosisState + preferredNode string + assumed *frameworkext.ReservationInfo } type nodeReservationState struct { nodeName string matched []*frameworkext.ReservationInfo + // podRequested represents all Pods(including matched reservation) requested resources // but excluding the already allocated from unmatched reservations podRequested *framework.Resource @@ -167,14 +171,22 @@ type nodeReservationState struct { rAllocated *framework.Resource } +type nodeDiagnosisState struct { + nodeName string + ownerMatched int // owner matched + isUnschedulableUnmatched int // owner matched but unmatched due to unschedulable + affinityUnmatched int // owner matched but unmatched due to affinity +} + func (s *stateData) Clone() framework.StateData { ns := &stateData{ - hasAffinity: s.hasAffinity, - podRequests: s.podRequests, - podRequestsResources: s.podRequestsResources, - nodeReservationStates: s.nodeReservationStates, - preferredNode: s.preferredNode, - assumed: s.assumed, + hasAffinity: s.hasAffinity, + podRequests: s.podRequests, + podRequestsResources: s.podRequestsResources, + nodeReservationStates: s.nodeReservationStates, + nodeReservationDiagnosis: s.nodeReservationDiagnosis, + preferredNode: s.preferredNode, + assumed: s.assumed, } preemptible := map[string]corev1.ResourceList{} for nodeName, returned := range s.preemptible { @@ -382,7 +394,7 @@ func (pl *Plugin) filterWithReservations(ctx context.Context, cycleState *framew var hasSatisfiedReservation bool allInsufficientResourcesByNode := sets.NewString() - allInsufficientResourcesByReservation := sets.NewString() + var allInsufficientResourcesByReservation []string for _, rInfo := range matchedReservations { resourceNames := quotav1.Intersection(rInfo.ResourceNames, podRequestsResourceNames) if len(resourceNames) == 0 { @@ -417,7 +429,7 @@ func (pl *Plugin) filterWithReservations(ctx context.Context, cycleState *framew } allInsufficientResourcesByNode.Insert(insufficientResourcesByNode...) for _, insufficientResourceByReservation := range insufficientResourcesByReservation { - allInsufficientResourcesByReservation.Insert(string(insufficientResourceByReservation)) + allInsufficientResourcesByReservation = append(allInsufficientResourcesByReservation, string(insufficientResourceByReservation)) } } } @@ -428,8 +440,8 @@ func (pl *Plugin) filterWithReservations(ctx context.Context, cycleState *framew for insufficientResourceByNode := range allInsufficientResourcesByNode { failureReasons = append(failureReasons, fmt.Sprintf("Insufficient %s by node", insufficientResourceByNode)) } - for insufficientResourceByReservation := range allInsufficientResourcesByReservation { - failureReasons = append(failureReasons, fmt.Sprintf("Insufficient %s by reservation", insufficientResourceByReservation)) + for _, insufficientResourceByReservation := range allInsufficientResourcesByReservation { + failureReasons = append(failureReasons, fmt.Sprintf("Reservation(s) Insufficient %s", insufficientResourceByReservation)) } if len(failureReasons) == 0 { failureReasons = append(failureReasons, ErrReasonNoReservationsMeetRequirements) @@ -497,7 +509,42 @@ func fitsNode(podRequest *framework.Resource, nodeInfo *framework.NodeInfo, node func (pl *Plugin) PostFilter(ctx context.Context, cycleState *framework.CycleState, pod *corev1.Pod, _ framework.NodeToStatusMap) (*framework.PostFilterResult, *framework.Status) { // Implement an empty function to be compatible with existing configurations - return nil, framework.NewStatus(framework.Unschedulable) + state := getStateData(cycleState) + reasons := pl.makePostFilterReasons(state) + return nil, framework.NewStatus(framework.Unschedulable, reasons...) +} + +func (pl *Plugin) makePostFilterReasons(state *stateData) []string { + ownerMatched, affinityUnmatched, isUnSchedulableUnmatched := 0, 0, 0 + for _, nodeState := range state.nodeReservationDiagnosis { + isUnSchedulableUnmatched += nodeState.isUnschedulableUnmatched + affinityUnmatched += nodeState.affinityUnmatched + ownerMatched += nodeState.ownerMatched + } + if ownerMatched <= 0 && !state.hasAffinity { + return nil + } + + // Make the error messages: The framework does not aggregate the same reasons for the PostFilter, so we need + // to prepare the exact messages by ourselves. + var reasons []string + var b strings.Builder + if affinityUnmatched > 0 { + b.WriteString(strconv.Itoa(affinityUnmatched)) + b.WriteString(" Reservation(s) didn't match affinity rules") + reasons = append(reasons, b.String()) + b.Reset() + } + if isUnSchedulableUnmatched > 0 { + b.WriteString(strconv.Itoa(isUnSchedulableUnmatched)) + b.WriteString(" Reservation(s) is unschedulable") + reasons = append(reasons, b.String()) + b.Reset() + } + b.WriteString(strconv.Itoa(ownerMatched)) + b.WriteString(" Reservation(s) matched owner total") + reasons = append(reasons, b.String()) + return reasons } func (pl *Plugin) FilterReservation(ctx context.Context, cycleState *framework.CycleState, pod *corev1.Pod, reservationInfo *frameworkext.ReservationInfo, nodeName string) *framework.Status { diff --git a/pkg/scheduler/plugins/reservation/plugin_test.go b/pkg/scheduler/plugins/reservation/plugin_test.go index e2d75fd9a..ecc1acfba 100644 --- a/pkg/scheduler/plugins/reservation/plugin_test.go +++ b/pkg/scheduler/plugins/reservation/plugin_test.go @@ -870,7 +870,7 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient cpu by reservation"), + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient cpu"), }, { name: "filter default reservations with preemption", @@ -1160,7 +1160,74 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient cpu by reservation"), + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient cpu"), + }, + { + name: "failed to filter multiple restricted reservations with preempt from node", + stateData: &stateData{ + hasAffinity: true, + podRequests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + }, + podRequestsResources: &framework.Resource{ + MilliCPU: 4 * 1000, + }, + preemptible: map[string]corev1.ResourceList{ + node.Name: { + corev1.ResourceCPU: resource.MustParse("4"), + }, + }, + preemptibleInRRs: nil, + nodeReservationStates: map[string]nodeReservationState{ + node.Name: { + podRequested: &framework.Resource{ + MilliCPU: 38 * 1000, + }, + rAllocated: &framework.Resource{ + MilliCPU: 10000, + }, + matched: []*frameworkext.ReservationInfo{ + { + Reservation: &schedulingv1alpha1.Reservation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-r", + UID: "123456", + }, + Spec: schedulingv1alpha1.ReservationSpec{ + AllocatePolicy: schedulingv1alpha1.ReservationAllocatePolicyRestricted, + }, + }, + ResourceNames: []corev1.ResourceName{corev1.ResourceCPU}, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("6"), + }, + Allocated: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("6"), + }, + }, + { + Reservation: &schedulingv1alpha1.Reservation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-r-1", + UID: "7891011", + }, + Spec: schedulingv1alpha1.ReservationSpec{ + AllocatePolicy: schedulingv1alpha1.ReservationAllocatePolicyRestricted, + }, + }, + ResourceNames: []corev1.ResourceName{corev1.ResourceCPU}, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + }, + Allocated: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient cpu", "Reservation(s) Insufficient cpu"), }, { name: "failed to filter restricted reservations with preempt from reservation and node", @@ -1217,7 +1284,7 @@ func Test_filterWithReservations(t *testing.T) { }, }, }, - wantStatus: framework.NewStatus(framework.Unschedulable, "Insufficient cpu by reservation"), + wantStatus: framework.NewStatus(framework.Unschedulable, "Reservation(s) Insufficient cpu"), }, { name: "filter restricted reservations with preempt from reservation and node", @@ -1766,6 +1833,94 @@ func TestFilterReservation(t *testing.T) { } } +func TestPostFilter(t *testing.T) { + type args struct { + hasStateData bool + hasAffinity bool + nodeReservationDiagnosis map[string]nodeDiagnosisState + } + tests := []struct { + name string + args args + want *framework.PostFilterResult + want1 *framework.Status + }{ + { + name: "no reservation filtering", + args: args{ + hasStateData: false, + }, + want: nil, + want1: framework.NewStatus(framework.Unschedulable), + }, + { + name: "show reservation owner matched when reservation affinity specified", + args: args{ + hasStateData: true, + hasAffinity: true, + nodeReservationDiagnosis: map[string]nodeDiagnosisState{}, + }, + want: nil, + want1: framework.NewStatus(framework.Unschedulable, "0 Reservation(s) matched owner total"), + }, + { + name: "show reservation owner matched, unschedulable unmatched", + args: args{ + hasStateData: true, + nodeReservationDiagnosis: map[string]nodeDiagnosisState{ + "test-node-0": { + ownerMatched: 3, + isUnschedulableUnmatched: 3, + affinityUnmatched: 0, + }, + "test-node-1": { + ownerMatched: 1, + isUnschedulableUnmatched: 1, + affinityUnmatched: 0, + }, + }, + }, + want: nil, + want1: framework.NewStatus(framework.Unschedulable, "4 Reservation(s) is unschedulable", "4 Reservation(s) matched owner total"), + }, + { + name: "show reservation owner matched, unschedulable and affinity unmatched", + args: args{ + hasStateData: true, + nodeReservationDiagnosis: map[string]nodeDiagnosisState{ + "test-node-0": { + ownerMatched: 3, + isUnschedulableUnmatched: 0, + affinityUnmatched: 3, + }, + "test-node-1": { + ownerMatched: 2, + isUnschedulableUnmatched: 1, + affinityUnmatched: 1, + }, + }, + }, + want: nil, + want1: framework.NewStatus(framework.Unschedulable, "4 Reservation(s) didn't match affinity rules", "1 Reservation(s) is unschedulable", "5 Reservation(s) matched owner total"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pl := &Plugin{} + cycleState := framework.NewCycleState() + if tt.args.hasStateData { + cycleState.Write(stateKey, &stateData{ + hasAffinity: tt.args.hasAffinity, + nodeReservationDiagnosis: tt.args.nodeReservationDiagnosis, + }) + } + got, got1 := pl.PostFilter(context.TODO(), cycleState, nil, nil) + assert.Equal(t, tt.want, got) + assert.Equal(t, tt.want1, got1) + }) + } +} + func TestReserve(t *testing.T) { reservation2C4G := &schedulingv1alpha1.Reservation{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/scheduler/plugins/reservation/transformer.go b/pkg/scheduler/plugins/reservation/transformer.go index 7592ca88b..6b3264f1a 100644 --- a/pkg/scheduler/plugins/reservation/transformer.go +++ b/pkg/scheduler/plugins/reservation/transformer.go @@ -61,9 +61,10 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * } requiredNodeAffinity := nodeaffinity.GetRequiredNodeAffinity(pod) - var stateIndex int32 + var stateIndex, diagnosisIndex int32 allNodes := pl.reservationCache.listAllNodes() allNodeReservationStates := make([]*nodeReservationState, len(allNodes)) + allNodeDiagnosisStates := make([]*nodeDiagnosisState, len(allNodes)) allPluginToRestoreState := make([]frameworkext.PluginToReservationRestoreStates, len(allNodes)) isReservedPod := reservationutil.IsReservePod(pod) @@ -100,6 +101,12 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * } var unmatched, matched []*frameworkext.ReservationInfo + diagnosisState := &nodeDiagnosisState{ + nodeName: node.Name, + ownerMatched: 0, + isUnschedulableUnmatched: 0, + affinityUnmatched: 0, + } status := pl.reservationCache.forEachAvailableReservationOnNode(node.Name, func(rInfo *frameworkext.ReservationInfo) (bool, *framework.Status) { if !rInfo.IsAvailable() || rInfo.ParseError != nil { return true, nil @@ -111,12 +118,24 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * return true, nil } - if !isReservedPod && !rInfo.IsUnschedulable() && matchReservation(pod, node, rInfo, reservationAffinity) { + isOwnerMatched := rInfo.Match(pod) + isUnschedulable := rInfo.IsUnschedulable() + isMatchReservationAffinity := matchReservationAffinity(node, rInfo, reservationAffinity) + if !isReservedPod && !isUnschedulable && isOwnerMatched && isMatchReservationAffinity { matched = append(matched, rInfo.Clone()) } else if len(rInfo.AssignedPods) > 0 { unmatched = append(unmatched, rInfo.Clone()) } + if isOwnerMatched { // count owner-matched diagnosis state + diagnosisState.ownerMatched++ + if isUnschedulable { + diagnosisState.isUnschedulableUnmatched++ + } else if !isMatchReservationAffinity { + diagnosisState.affinityUnmatched++ + } + } + return true, nil }) if !status.IsSuccess() { @@ -126,6 +145,11 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * return } + if diagnosisState.ownerMatched > 0 { + idx := atomic.AddInt32(&diagnosisIndex, 1) + allNodeDiagnosisStates[idx-1] = diagnosisState + } + if len(matched) == 0 && len(unmatched) == 0 { return } @@ -199,19 +223,22 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * allNodeReservationStates = allNodeReservationStates[:stateIndex] allPluginToRestoreState = allPluginToRestoreState[:stateIndex] + allNodeDiagnosisStates = allNodeDiagnosisStates[:diagnosisIndex] podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequestResources := framework.NewResource(podRequests) state := &stateData{ - hasAffinity: reservationAffinity != nil, - podRequests: podRequests, - podRequestsResources: podRequestResources, - preemptible: map[string]corev1.ResourceList{}, - preemptibleInRRs: map[string]map[types.UID]corev1.ResourceList{}, - nodeReservationStates: map[string]nodeReservationState{}, + hasAffinity: reservationAffinity != nil, + podRequests: podRequests, + podRequestsResources: podRequestResources, + preemptible: map[string]corev1.ResourceList{}, + preemptibleInRRs: map[string]map[types.UID]corev1.ResourceList{}, + nodeReservationStates: map[string]nodeReservationState{}, + nodeReservationDiagnosis: map[string]nodeDiagnosisState{}, } pluginToNodeReservationRestoreState := frameworkext.PluginToNodeReservationRestoreStates{} - for index, v := range allNodeReservationStates { + for index := range allNodeReservationStates { + v := allNodeReservationStates[index] state.nodeReservationStates[v.nodeName] = *v for pluginName, pluginState := range allPluginToRestoreState[index] { if pluginState == nil { @@ -225,6 +252,10 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * nodeRestoreStates[v.nodeName] = pluginState } } + for i := range allNodeDiagnosisStates { + v := allNodeDiagnosisStates[i] + state.nodeReservationDiagnosis[v.nodeName] = *v + } if extender != nil { status := extender.RunReservationExtensionFinalRestoreReservation(ctx, cycleState, pod, pluginToNodeReservationRestoreState) if !status.IsSuccess() { @@ -347,11 +378,7 @@ func calculateResource(pod *corev1.Pod) (res framework.Resource, non0CPU int64, return } -func matchReservation(pod *corev1.Pod, node *corev1.Node, reservation *frameworkext.ReservationInfo, reservationAffinity *reservationutil.RequiredReservationAffinity) bool { - if !reservation.Match(pod) { - return false - } - +func matchReservationAffinity(node *corev1.Node, reservation *frameworkext.ReservationInfo, reservationAffinity *reservationutil.RequiredReservationAffinity) bool { if reservationAffinity != nil { // NOTE: There are some special scenarios. // For example, the AZ where the Pod wants to select the Reservation is cn-hangzhou, but the Reservation itself diff --git a/pkg/scheduler/plugins/reservation/transformer_test.go b/pkg/scheduler/plugins/reservation/transformer_test.go index 21430c38d..0bee83c25 100644 --- a/pkg/scheduler/plugins/reservation/transformer_test.go +++ b/pkg/scheduler/plugins/reservation/transformer_test.go @@ -322,6 +322,14 @@ func TestRestoreReservation(t *testing.T) { rAllocated: framework.NewResource(nil), }, }, + nodeReservationDiagnosis: map[string]nodeDiagnosisState{ + node.Name: { + nodeName: node.Name, + ownerMatched: 1, + affinityUnmatched: 0, + isUnschedulableUnmatched: 0, + }, + }, } assert.Equal(t, expectedStat, getStateData(cycleState)) @@ -348,20 +356,12 @@ func TestRestoreReservation(t *testing.T) { func Test_matchReservation(t *testing.T) { tests := []struct { name string - pod *corev1.Pod reservation *schedulingv1alpha1.Reservation reservationAffinity *apiext.ReservationAffinity want bool }{ { - name: "only match reservation owners", - pod: &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": "test", - }, - }, - }, + name: "nothing to match", reservation: &schedulingv1alpha1.Reservation{ Spec: schedulingv1alpha1.ReservationSpec{ Owners: []schedulingv1alpha1.ReservationOwner{ @@ -378,18 +378,48 @@ func Test_matchReservation(t *testing.T) { want: true, }, { - name: "match reservation owners and match reservation affinity", - pod: &corev1.Pod{ + name: "match reservation affinity", + reservation: &schedulingv1alpha1.Reservation{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - "app": "test", + "reservation-type": "reservation-test", + }, + }, + Spec: schedulingv1alpha1.ReservationSpec{ + Owners: []schedulingv1alpha1.ReservationOwner{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + }, + }, + }, + }, + reservationAffinity: &apiext.ReservationAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &apiext.ReservationAffinitySelector{ + ReservationSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "reservation-type", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"reservation-test"}, + }, + }, + }, }, }, }, + want: true, + }, + { + name: "not match reservation affinity", reservation: &schedulingv1alpha1.Reservation{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - "reservation-type": "reservation-test", + "reservation-type": "reservation-test-not-match", }, }, Spec: schedulingv1alpha1.ReservationSpec{ @@ -419,23 +449,28 @@ func Test_matchReservation(t *testing.T) { }, }, }, - want: true, + want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + }, + } if tt.reservationAffinity != nil { affinityData, err := json.Marshal(tt.reservationAffinity) assert.NoError(t, err) - if tt.pod.Annotations == nil { - tt.pod.Annotations = map[string]string{} + if pod.Annotations == nil { + pod.Annotations = map[string]string{} } - tt.pod.Annotations[apiext.AnnotationReservationAffinity] = string(affinityData) + pod.Annotations[apiext.AnnotationReservationAffinity] = string(affinityData) } - reservationAffinity, err := reservationutil.GetRequiredReservationAffinity(tt.pod) + reservationAffinity, err := reservationutil.GetRequiredReservationAffinity(pod) assert.NoError(t, err) rInfo := frameworkext.NewReservationInfo(tt.reservation) - got := matchReservation(tt.pod, &corev1.Node{}, rInfo, reservationAffinity) + got := matchReservationAffinity(&corev1.Node{}, rInfo, reservationAffinity) assert.Equal(t, tt.want, got) }) }