Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support GetPreferredAllocation #443

Open
wants to merge 4 commits into
base: master
Choose a base branch
from

Commits on Jul 25, 2023

  1. Add allocator

    diff --git a/pkg/resources/allocator.go b/pkg/resources/allocator.go
    new file mode 100644
    index 00000000..ce9e8019
    --- /dev/null
    +++ b/pkg/resources/allocator.go
    @@ -0,0 +1,112 @@
    +// Copyright 2022 Intel Corp. All Rights Reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//     http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package resources
    +
    +import (
    +	"sort"
    +
    +	"github.com/golang/glog"
    +
    +	pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
    +
    +	"github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/types"
    +)
    +
    +// DeviceSet is to hold and manipulate a set of HostDevice
    +type DeviceSet map[string]types.HostDevice
    +
    +// PackedAllocator implements the Allocator interface
    +type PackedAllocator struct {
    +}
    +
    +// NewPackedAllocator create instance of PackedAllocator
    +func NewPackedAllocator() *PackedAllocator {
    +	return &PackedAllocator{}
    +}
    +
    +// NewDeviceSet is to create an empty DeviceSet
    +func NewDeviceSet() DeviceSet {
    +	set := make(DeviceSet)
    +	return set
    +}
    +
    +// Insert is to add a HostDevice in DeviceSet
    +func (ds *DeviceSet) Insert(pciAddr string, device types.HostDevice) {
    +	(*ds)[pciAddr] = device
    +}
    +
    +// Delete is to delete a HostDevice in DeviceSet
    +func (ds *DeviceSet) Delete(pciAddr string) {
    +	delete(*ds, pciAddr)
    +}
    +
    +// AsSortedStrings is to sort the DeviceSet and return the sorted keys
    +func (ds *DeviceSet) AsSortedStrings() []string {
    +	keys := make([]string, 0, len(*ds))
    +	for k := range *ds {
    +		keys = append(keys, k)
    +	}
    +	sort.Strings(keys)
    +	return keys
    +}
    +
    +// Allocate return the preferred allocation
    +func (pa *PackedAllocator) Allocate(rqt *pluginapi.ContainerPreferredAllocationRequest, rp types.ResourcePool) []string {
    +	size := rqt.AllocationSize
    +	preferredDevices := make([]string, 0)
    +
    +	if size <= 0 {
    +		glog.Warningf("Allocator(): requested number of devices are negative. requested: %d", size)
    +		return []string{}
    +	}
    +
    +	if len(rqt.AvailableDeviceIDs) < int(size) {
    +		glog.Warningf("Allocator(): not enough number of devices were available. available: %d, requested: %d", len(rqt.AvailableDeviceIDs), size)
    +		return []string{}
    +	}
    +
    +	if len(rqt.MustIncludeDeviceIDs) > int(size) {
    +		glog.Warningf("Allocator(): allocated number of devices exceeded the number of requested devices. allocated: %d, requested: %d",
    +			len(rqt.MustIncludeDeviceIDs),
    +			size)
    +	}
    +
    +	availableSet := NewDeviceSet()
    +	for _, available := range rqt.AvailableDeviceIDs {
    +		dev, ok := rp.GetDevicePool()[available]
    +		if ok {
    +			availableSet.Insert(available, dev)
    +		} else {
    +			glog.Warningf("Allocator(): not available device id was specified: %s", available)
    +			return []string{}
    +		}
    +	}
    +	for _, required := range rqt.MustIncludeDeviceIDs {
    +		_, ok := rp.GetDevicePool()[required]
    +		if ok {
    +			availableSet.Delete(required)
    +		} else {
    +			glog.Warningf("Allocator(): not available device was included: %s", required)
    +			return []string{}
    +		}
    +	}
    +	sortedAvailableSet := availableSet.AsSortedStrings()
    +
    +	preferredDevices = append(preferredDevices, rqt.MustIncludeDeviceIDs...)
    +	if len(preferredDevices) < int(size) {
    +		preferredDevices = append(preferredDevices, sortedAvailableSet[:int(size)-len(preferredDevices)]...)
    +	}
    +	return preferredDevices
    +}
    diff --git a/pkg/resources/allocator_test.go b/pkg/resources/allocator_test.go
    new file mode 100644
    index 00000000..c2a21053
    --- /dev/null
    +++ b/pkg/resources/allocator_test.go
    @@ -0,0 +1,197 @@
    +package resources_test
    +
    +import (
    +	"reflect"
    +
    +	"github.com/jaypipes/pcidb"
    +
    +	"github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/netdevice"
    +
    +	"github.com/jaypipes/ghw"
    +	pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
    +
    +	"github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/factory"
    +	"github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/resources"
    +	"github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/types"
    +
    +	. "github.com/onsi/ginkgo"
    +	. "github.com/onsi/ginkgo/extensions/table"
    +	. "github.com/onsi/gomega"
    +)
    +
    +var _ = Describe("Allocator", func() {
    +	var (
    +		f  types.ResourceFactory
    +		rc *types.ResourceConfig
    +	)
    +	newPciDeviceFn := func(pciAddr string) *ghw.PCIDevice {
    +		return &ghw.PCIDevice{
    +			Address: pciAddr,
    +			Vendor:  &pcidb.Vendor{ID: ""},
    +			Product: &pcidb.Product{ID: ""},
    +		}
    +	}
    +
    +	BeforeEach(func() {
    +	})
    +	Describe("creating new packed allocator", func() {
    +		Context("with valid policy", func() {
    +			It("should return valid allocator", func() {
    +				packedAllocator := resources.NewPackedAllocator()
    +				expected := &resources.PackedAllocator{}
    +				Expect(reflect.TypeOf(packedAllocator)).To(Equal(reflect.TypeOf(expected)))
    +			})
    +		})
    +	})
    +	Describe("creating new device set", func() {
    +		Context("with no element", func() {
    +			It("should return valid device set", func() {
    +				ds := resources.NewDeviceSet()
    +				expected := make(resources.DeviceSet)
    +				Expect(reflect.TypeOf(ds)).To(Equal(reflect.TypeOf(expected)))
    +			})
    +		})
    +	})
    +	Describe("manipulating device set", func() {
    +		Context("by inserting and deleting elements", func() {
    +			It("should return no error and valid device set", func() {
    +				f = factory.NewResourceFactory("fake", "fake", true)
    +				rc = &types.ResourceConfig{SelectorObj: types.NetDeviceSelectors{}}
    +				ds := resources.NewDeviceSet()
    +				d1, _ := netdevice.NewPciNetDevice(newPciDeviceFn("0000:00:00.1"), f, rc)
    +				d2, _ := netdevice.NewPciNetDevice(newPciDeviceFn("0000:00:af.0"), f, rc)
    +				d3, _ := netdevice.NewPciNetDevice(newPciDeviceFn("0000:00:1b.2"), f, rc)
    +				d4, _ := netdevice.NewPciNetDevice(newPciDeviceFn("0000:00:1b.0"), f, rc)
    +
    +				ds.Insert("0000:00:00.1", d1)
    +				ds.Insert("0000:00:af.0", d2)
    +				ds.Insert("0000:00:1b.2", d3)
    +				ds.Insert("0000:00:1b.0", d4)
    +				expectedSet := resources.DeviceSet{
    +					"0000:00:00.1": d1,
    +					"0000:00:af.0": d2,
    +					"0000:00:1b.2": d3,
    +					"0000:00:1b.0": d4,
    +				}
    +				Expect(ds).To(HaveLen(4))
    +				Expect(reflect.DeepEqual(ds, expectedSet)).To(Equal(true))
    +
    +				sortedKeys := ds.AsSortedStrings()
    +				expectedSlice := []string{
    +					"0000:00:00.1",
    +					"0000:00:1b.0",
    +					"0000:00:1b.2",
    +					"0000:00:af.0",
    +				}
    +				Expect(sortedKeys).To(Equal(expectedSlice))
    +
    +				ds.Delete("0000:00:00.1")
    +				ds.Delete("0000:00:af.0")
    +				ds.Delete("0000:00:1b.2")
    +				ds.Delete("0000:00:1b.0")
    +				Expect(ds).To(HaveLen(0))
    +			})
    +		})
    +	})
    +	DescribeTable("allocating with packed allocator",
    +		func(rqt *pluginapi.ContainerPreferredAllocationRequest, expected []string) {
    +			rc = &types.ResourceConfig{SelectorObj: types.NetDeviceSelectors{}}
    +			f = factory.NewResourceFactory("fake", "fake", true)
    +			d1, _ := netdevice.NewPciNetDevice(newPciDeviceFn("0000:00:00.1"), f, rc)
    +			d2, _ := netdevice.NewPciNetDevice(newPciDeviceFn("0000:00:af.0"), f, rc)
    +			d3, _ := netdevice.NewPciNetDevice(newPciDeviceFn("0000:00:1b.2"), f, rc)
    +			d4, _ := netdevice.NewPciNetDevice(newPciDeviceFn("0000:00:1b.0"), f, rc)
    +			rp := resources.NewResourcePool(rc,
    +				map[string]types.HostDevice{
    +					"0000:00:00.1": d1,
    +					"0000:00:af.0": d2,
    +					"0000:00:1b.2": d3,
    +					"0000:00:1b.0": d4,
    +				},
    +			)
    +			pa := resources.NewPackedAllocator()
    +			sortedKeys := pa.Allocate(rqt, rp)
    +			Expect(sortedKeys).To(Equal(expected))
    +		},
    +		Entry("allocating successfully with 3 device IDs",
    +			&pluginapi.ContainerPreferredAllocationRequest{
    +				AvailableDeviceIDs: []string{
    +					"0000:00:00.1",
    +					"0000:00:af.0",
    +					"0000:00:1b.2",
    +					"0000:00:1b.0",
    +				},
    +				MustIncludeDeviceIDs: []string{},
    +				AllocationSize:       int32(3),
    +			},
    +			[]string{
    +				"0000:00:00.1",
    +				"0000:00:1b.0",
    +				"0000:00:1b.2",
    +			},
    +		),
    +		Entry("allocating with invalid available device IDs",
    +			&pluginapi.ContainerPreferredAllocationRequest{
    +				AvailableDeviceIDs: []string{
    +					"0000:00:00.2",
    +					"0000:00:af.1",
    +				},
    +				MustIncludeDeviceIDs: []string{},
    +				AllocationSize:       int32(1),
    +			},
    +			[]string{},
    +		),
    +		Entry("allocating with invalid must include device IDs",
    +			&pluginapi.ContainerPreferredAllocationRequest{
    +				AvailableDeviceIDs: []string{
    +					"0000:00:00.1",
    +					"0000:00:af.0",
    +					"0000:00:1b.2",
    +				},
    +				MustIncludeDeviceIDs: []string{
    +					"0000:00:00.5",
    +					"0000:00:00.6",
    +				},
    +				AllocationSize: int32(2),
    +			},
    +			[]string{},
    +		),
    +		Entry("allocating with invalid size 1",
    +			&pluginapi.ContainerPreferredAllocationRequest{
    +				AvailableDeviceIDs: []string{
    +					"0000:00:00.2",
    +					"0000:00:af.1",
    +				},
    +				MustIncludeDeviceIDs: []string{},
    +				AllocationSize:       int32(3),
    +			},
    +			[]string{},
    +		),
    +		Entry("allocating with invalid size 2",
    +			&pluginapi.ContainerPreferredAllocationRequest{
    +				AvailableDeviceIDs: []string{
    +					"0000:00:00.2",
    +					"0000:00:af.1",
    +				},
    +				MustIncludeDeviceIDs: []string{},
    +				AllocationSize:       int32(-1),
    +			},
    +			[]string{},
    +		),
    +		Entry("allocating with invalid size 3",
    +			&pluginapi.ContainerPreferredAllocationRequest{
    +				AvailableDeviceIDs: []string{
    +					"0000:00:00.2",
    +					"0000:00:af.0",
    +					"0000:00:1b.2",
    +				},
    +				MustIncludeDeviceIDs: []string{
    +					"0000:00:00.2",
    +					"0000:00:af.1",
    +				},
    +				AllocationSize: int32(3),
    +			},
    +			[]string{},
    +		),
    +	)
    +})
    diff --git a/pkg/types/mocks/Allocator.go b/pkg/types/mocks/Allocator.go
    new file mode 100644
    index 00000000..ed06898b
    --- /dev/null
    +++ b/pkg/types/mocks/Allocator.go
    @@ -0,0 +1,45 @@
    +// Code generated by mockery v2.20.2. DO NOT EDIT.
    +
    +package mocks
    +
    +import (
    +	types "github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/types"
    +	mock "github.com/stretchr/testify/mock"
    +	v1beta1 "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
    +)
    +
    +// Allocator is an autogenerated mock type for the Allocator type
    +type Allocator struct {
    +	mock.Mock
    +}
    +
    +// Allocate provides a mock function with given fields: _a0, _a1
    +func (_m *Allocator) Allocate(_a0 *v1beta1.ContainerPreferredAllocationRequest, _a1 types.ResourcePool) []string {
    +	ret := _m.Called(_a0, _a1)
    +
    +	var r0 []string
    +	if rf, ok := ret.Get(0).(func(*v1beta1.ContainerPreferredAllocationRequest, types.ResourcePool) []string); ok {
    +		r0 = rf(_a0, _a1)
    +	} else {
    +		if ret.Get(0) != nil {
    +			r0 = ret.Get(0).([]string)
    +		}
    +	}
    +
    +	return r0
    +}
    +
    +type mockConstructorTestingTNewAllocator interface {
    +	mock.TestingT
    +	Cleanup(func())
    +}
    +
    +// NewAllocator creates a new instance of Allocator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
    +func NewAllocator(t mockConstructorTestingTNewAllocator) *Allocator {
    +	mock := &Allocator{}
    +	mock.Mock.Test(t)
    +
    +	t.Cleanup(func() { mock.AssertExpectations(t) })
    +
    +	return mock
    +}
    diff --git a/pkg/types/mocks/ResourcePool.go b/pkg/types/mocks/ResourcePool.go
    index f3d209c2..1522909e 100644
    --- a/pkg/types/mocks/ResourcePool.go
    +++ b/pkg/types/mocks/ResourcePool.go
    @@ -3,6 +3,7 @@
     package mocks
    
     import (
    +	types "github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/types"
     	mock "github.com/stretchr/testify/mock"
    
     	v1beta1 "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
    @@ -27,6 +28,22 @@ func (_m *ResourcePool) CleanDeviceInfoFile(resourceNamePrefix string) error {
     	return r0
     }
    
    +// GetDevicePool provides a mock function with given fields:
    +func (_m *ResourcePool) GetDevicePool() map[string]types.HostDevice {
    +	ret := _m.Called()
    +
    +	var r0 map[string]types.HostDevice
    +	if rf, ok := ret.Get(0).(func() map[string]types.HostDevice); ok {
    +		r0 = rf()
    +	} else {
    +		if ret.Get(0) != nil {
    +			r0 = ret.Get(0).(map[string]types.HostDevice)
    +		}
    +	}
    +
    +	return r0
    +}
    +
     // GetDeviceSpecs provides a mock function with given fields: deviceIDs
     func (_m *ResourcePool) GetDeviceSpecs(deviceIDs []string) []*v1beta1.DeviceSpec {
     	ret := _m.Called(deviceIDs)
    diff --git a/pkg/types/types.go b/pkg/types/types.go
    index 6fa4099b..f482c9c0 100644
    --- a/pkg/types/types.go
    +++ b/pkg/types/types.go
    @@ -193,6 +193,7 @@ type ResourcePool interface {
     	GetMounts(deviceIDs []string) []*pluginapi.Mount
     	StoreDeviceInfoFile(resourceNamePrefix string) error
     	CleanDeviceInfoFile(resourceNamePrefix string) error
    +	GetDevicePool() map[string]HostDevice // for Allocate
     }
    
     // DeviceProvider provides interface for device discovery
    @@ -299,6 +300,11 @@ type DeviceInfoProvider interface {
     	GetMounts() []*pluginapi.Mount
     }
    
    +// Allocator is an interface to get preferred device allocation
    +type Allocator interface {
    +	Allocate(*pluginapi.ContainerPreferredAllocationRequest, ResourcePool) []string
    +}
    +
     // DeviceSelector provides an interface for filtering a list of devices
     type DeviceSelector interface {
     	Filter([]HostDevice) []HostDevice
    wattmto committed Jul 25, 2023
    Configuration menu
    Copy the full SHA
    b2b72fb View commit details
    Browse the repository at this point in the history
  2. Add AllocationPolicy

    diff --git a/pkg/resources/pool_stub.go b/pkg/resources/pool_stub.go
    index ca1abd58..490b6c65 100644
    --- a/pkg/resources/pool_stub.go
    +++ b/pkg/resources/pool_stub.go
    @@ -52,6 +52,11 @@ func (rp *ResourcePoolImpl) InitDevice() error {
     	return nil
     }
    
    +// GetAllocationPolicy returns the allocation policy as string
    +func (rp *ResourcePoolImpl) GetAllocationPolicy() string {
    +	return rp.config.AllocationPolicy
    +}
    +
     // GetResourceName returns the resource name as string
     func (rp *ResourcePoolImpl) GetResourceName() string {
     	return rp.config.ResourceName
    diff --git a/pkg/types/mocks/ResourcePool.go b/pkg/types/mocks/ResourcePool.go
    index 1522909e..edc78a1c 100644
    --- a/pkg/types/mocks/ResourcePool.go
    +++ b/pkg/types/mocks/ResourcePool.go
    @@ -28,6 +28,20 @@ func (_m *ResourcePool) CleanDeviceInfoFile(resourceNamePrefix string) error {
     	return r0
     }
    
    +// GetAllocationPolicy provides a mock function with given fields:
    +func (_m *ResourcePool) GetAllocationPolicy() string {
    +	ret := _m.Called()
    +
    +	var r0 string
    +	if rf, ok := ret.Get(0).(func() string); ok {
    +		r0 = rf()
    +	} else {
    +		r0 = ret.Get(0).(string)
    +	}
    +
    +	return r0
    +}
    +
     // GetDevicePool provides a mock function with given fields:
     func (_m *ResourcePool) GetDevicePool() map[string]types.HostDevice {
     	ret := _m.Called()
    diff --git a/pkg/types/types.go b/pkg/types/types.go
    index f482c9c0..ef213dca 100644
    --- a/pkg/types/types.go
    +++ b/pkg/types/types.go
    @@ -96,12 +96,13 @@ type ResourceConfig struct {
     	// optional resource prefix that will overwrite	global prefix specified in cli params
     	ResourcePrefix string `json:"resourcePrefix,omitempty"`
     	//nolint:lll
    -	ResourceName    string                    `json:"resourceName"` // the resource name will be added with resource prefix in K8s api
    -	DeviceType      DeviceType                `json:"deviceType,omitempty"`
    -	ExcludeTopology bool                      `json:"excludeTopology,omitempty"`
    -	Selectors       *json.RawMessage          `json:"selectors,omitempty"`
    -	AdditionalInfo  map[string]AdditionalInfo `json:"additionalInfo,omitempty"`
    -	SelectorObjs    []interface{}
    +	ResourceName     string                    `json:"resourceName"` // the resource name will be added with resource prefix in K8s api
    +	DeviceType       DeviceType                `json:"deviceType,omitempty"`
    +	ExcludeTopology  bool                      `json:"excludeTopology,omitempty"`
    +	Selectors        *json.RawMessage          `json:"selectors,omitempty"`
    +	AdditionalInfo   map[string]AdditionalInfo `json:"additionalInfo,omitempty"`
    +	AllocationPolicy string                    `json:"allocationPolicy,omitempty"`
    +	SelectorObjs     []interface{}
     }
    
     // DeviceSelectors contains common device selectors fields
    @@ -194,6 +195,7 @@ type ResourcePool interface {
     	StoreDeviceInfoFile(resourceNamePrefix string) error
     	CleanDeviceInfoFile(resourceNamePrefix string) error
     	GetDevicePool() map[string]HostDevice // for Allocate
    +	GetAllocationPolicy() string
     }
    
     // DeviceProvider provides interface for device discovery
    wattmto committed Jul 25, 2023
    Configuration menu
    Copy the full SHA
    2e85e69 View commit details
    Browse the repository at this point in the history
  3. Add Allocator to ResourceServer

    diff --git a/pkg/factory/factory.go b/pkg/factory/factory.go
    index 7c2743a3..6ca6c468 100644
    --- a/pkg/factory/factory.go
    +++ b/pkg/factory/factory.go
    @@ -56,7 +56,12 @@ func (rf *resourceFactory) GetResourceServer(rp types.ResourcePool) (types.Resou
     		if prefixOverride := rp.GetResourcePrefix(); prefixOverride != "" {
     			prefix = prefixOverride
     		}
    -		return resources.NewResourceServer(prefix, rf.endPointSuffix, rf.pluginWatch, rp), nil
    +		policy := rp.GetAllocationPolicy()
    +		allocator, err := rf.GetAllocator(policy)
    +		if err != nil {
    +			return nil, err
    +		}
    +		return resources.NewResourceServer(prefix, rf.endPointSuffix, rf.pluginWatch, rp, allocator), nil
     	}
     	return nil, fmt.Errorf("factory: unable to get resource pool object")
     }
    @@ -221,3 +226,15 @@ func (rf *resourceFactory) GetDeviceFilter(rc *types.ResourceConfig) ([]interfac
     func (rf *resourceFactory) GetNadUtils() types.NadUtils {
     	return netdevice.NewNadUtils()
     }
    +
    +// GetAllocator returns an instance of Allocator using preferredAllocationPolicy
    +func (rf *resourceFactory) GetAllocator(policy string) (types.Allocator, error) {
    +	switch policy {
    +	case "":
    +		return nil, nil
    +	case "packed":
    +		return resources.NewPackedAllocator(), nil
    +	default:
    +		return nil, fmt.Errorf("GetAllocator(): invalid policy %s", policy)
    +	}
    +}
    diff --git a/pkg/factory/factory_test.go b/pkg/factory/factory_test.go
    index bd1589af..967d5882 100644
    --- a/pkg/factory/factory_test.go
    +++ b/pkg/factory/factory_test.go
    @@ -624,7 +624,8 @@ var _ = Describe("Factory", func() {
     			f := factory.NewResourceFactory("fake", "fake", true)
     			rp := mocks.ResourcePool{}
     			rp.On("GetResourcePrefix").Return("overridden").
    -				On("GetResourceName").Return("fake")
    +				On("GetResourceName").Return("fake").
    +				On("GetAllocationPolicy").Return("")
     			rs, e := f.GetResourceServer(&rp)
     			It("should not fail", func() {
     				Expect(e).NotTo(HaveOccurred())
    @@ -632,4 +633,26 @@ var _ = Describe("Factory", func() {
     			})
     		})
     	})
    +	DescribeTable("getting allocator",
    +		func(policy string, shouldSucceed bool, expected reflect.Type) {
    +			f := factory.NewResourceFactory("fake", "fake", true)
    +			allocator, error := f.GetAllocator(policy)
    +
    +			if shouldSucceed {
    +				Expect(error).NotTo(HaveOccurred())
    +			} else {
    +				Expect(allocator).To(BeNil())
    +			}
    +
    +			// if statement below because gomega refuses to do "nil == nil" assertions
    +			if expected != nil {
    +				Expect(reflect.TypeOf(allocator)).To(Equal(expected))
    +			} else {
    +				Expect(reflect.TypeOf(allocator)).To(BeNil())
    +			}
    +		},
    +		Entry("packed", "packed", true, reflect.TypeOf(resources.NewPackedAllocator())),
    +		Entry("empty", "", true, reflect.TypeOf(nil)),
    +		Entry("invalid value", "invalid policy", false, reflect.TypeOf(nil)),
    +	)
     })
    diff --git a/pkg/resources/server.go b/pkg/resources/server.go
    index 549fa467..4649f3aa 100644
    --- a/pkg/resources/server.go
    +++ b/pkg/resources/server.go
    @@ -43,6 +43,7 @@ type resourceServer struct {
     	updateSignal       chan bool
     	stopWatcher        chan bool
     	checkIntervals     int // health check intervals in seconds
    +	allocator          types.Allocator
     }
    
     const (
    @@ -51,7 +52,7 @@ const (
     )
    
     // NewResourceServer returns an instance of ResourceServer
    -func NewResourceServer(prefix, suffix string, pluginWatch bool, rp types.ResourcePool) types.ResourceServer {
    +func NewResourceServer(prefix, suffix string, pluginWatch bool, rp types.ResourcePool, allocator types.Allocator) types.ResourceServer {
     	sockName := fmt.Sprintf("%s_%s.%s", prefix, rp.GetResourceName(), suffix)
     	sockPath := filepath.Join(types.SockDir, sockName)
     	if !pluginWatch {
    @@ -68,6 +69,7 @@ func NewResourceServer(prefix, suffix string, pluginWatch bool, rp types.Resourc
     		updateSignal:       make(chan bool),
     		stopWatcher:        make(chan bool),
     		checkIntervals:     20, // updates every 20 seconds
    +		allocator:          allocator,
     	}
     }
    
    @@ -180,10 +182,22 @@ func (rs *resourceServer) ListAndWatch(empty *pluginapi.Empty, stream pluginapi.
     	}
     }
    
    -// TODO: (SchSeba) check if we want to use this function
     func (rs *resourceServer) GetPreferredAllocation(ctx context.Context,
    -	request *pluginapi.PreferredAllocationRequest) (*pluginapi.PreferredAllocationResponse, error) {
    -	return &pluginapi.PreferredAllocationResponse{}, nil
    +	rqt *pluginapi.PreferredAllocationRequest) (*pluginapi.PreferredAllocationResponse, error) {
    +	glog.Infof("GetPreferredAllocation called with %+v", rqt)
    +
    +	if rs.allocator == nil {
    +		glog.Errorf("allocator is nil\n")
    +		return nil, fmt.Errorf("allocator is nil")
    +	}
    +	resp := new(pluginapi.PreferredAllocationResponse)
    +	for _, containerReq := range rqt.ContainerRequests {
    +		containerResp := new(pluginapi.ContainerPreferredAllocationResponse)
    +		containerResp.DeviceIDs = rs.allocator.Allocate(containerReq, rs.resourcePool)
    +		resp.ContainerResponses = append(resp.ContainerResponses, containerResp)
    +	}
    +	glog.Infof("PreferredAllocationResponse send: %+v", resp)
    +	return resp, nil
     }
    
     func (rs *resourceServer) PreStartContainer(ctx context.Context,
    @@ -194,7 +208,7 @@ func (rs *resourceServer) PreStartContainer(ctx context.Context,
     func (rs *resourceServer) GetDevicePluginOptions(ctx context.Context, empty *pluginapi.Empty) (*pluginapi.DevicePluginOptions, error) {
     	return &pluginapi.DevicePluginOptions{
     		PreStartRequired:                false,
    -		GetPreferredAllocationAvailable: false,
    +		GetPreferredAllocationAvailable: rs.allocator != nil,
     	}, nil
     }
    
    diff --git a/pkg/resources/server_test.go b/pkg/resources/server_test.go
    index 52afa230..004c1275 100644
    --- a/pkg/resources/server_test.go
    +++ b/pkg/resources/server_test.go
    @@ -23,6 +23,7 @@ var _ = Describe("Server", func() {
     		Context("valid arguments are passed", func() {
     			var rs *resourceServer
     			rp := mocks.ResourcePool{}
    +			allocator := mocks.Allocator{}
     			BeforeEach(func() {
     				fs := &utils.FakeFilesystem{}
     				defer fs.Use()()
    @@ -30,17 +31,18 @@ var _ = Describe("Server", func() {
     			})
     			It("should have the properties correctly assigned when plugin watcher enabled", func() {
     				// Create ResourceServer with plugin watch mode enabled
    -				obj := NewResourceServer("fakeprefix", "fakesuffix", true, &rp)
    +				obj := NewResourceServer("fakeprefix", "fakesuffix", true, &rp, &allocator)
     				rs = obj.(*resourceServer)
     				Expect(rs.resourcePool.GetResourceName()).To(Equal("fakename"))
     				Expect(rs.resourceNamePrefix).To(Equal("fakeprefix"))
     				Expect(rs.endPoint).To(Equal("fakeprefix_fakename.fakesuffix"))
     				Expect(rs.pluginWatch).To(Equal(true))
     				Expect(rs.sockPath).To(Equal(filepath.Join(types.SockDir, "fakeprefix_fakename.fakesuffix")))
    +				Expect(rs.allocator).To(Equal(&allocator))
     			})
     			It("should have the properties correctly assigned when plugin watcher disabled", func() {
     				// Create ResourceServer with plugin watch mode disabled
    -				obj := NewResourceServer("fakeprefix", "fakesuffix", false, &rp)
    +				obj := NewResourceServer("fakeprefix", "fakesuffix", false, &rp, &allocator)
     				rs = obj.(*resourceServer)
     				Expect(rs.resourcePool.GetResourceName()).To(Equal("fakename"))
     				Expect(rs.resourceNamePrefix).To(Equal("fakeprefix"))
    @@ -48,6 +50,7 @@ var _ = Describe("Server", func() {
     				Expect(rs.pluginWatch).To(Equal(false))
     				Expect(rs.sockPath).To(Equal(filepath.Join(types.DeprecatedSockDir,
     					"fakeprefix_fakename.fakesuffix")))
    +				Expect(rs.allocator).To(Equal(&allocator))
     			})
     		})
     	})
    @@ -62,12 +65,13 @@ var _ = Describe("Server", func() {
     			rp.On("GetResourceName").Return("fakename")
     			rp.On("StoreDeviceInfoFile", "fakeprefix").Return(nil)
     			rp.On("CleanDeviceInfoFile", "fakeprefix").Return(nil)
    +			allocator := mocks.Allocator{}
    
     			// Use faked dir as socket dir
     			types.SockDir = fs.RootDir
     			types.DeprecatedSockDir = fs.RootDir
    
    -			obj := NewResourceServer("fakeprefix", "fakesuffix", shouldEnablePluginWatch, &rp)
    +			obj := NewResourceServer("fakeprefix", "fakesuffix", shouldEnablePluginWatch, &rp, &allocator)
     			rs := obj.(*resourceServer)
    
     			registrationServer := createFakeRegistrationServer(fs.RootDir,
    @@ -115,7 +119,8 @@ var _ = Describe("Server", func() {
     				defer fs.Use()()
     				rp := mocks.ResourcePool{}
     				rp.On("GetResourceName").Return("fake.com")
    -				rs := NewResourceServer("fakeprefix", "fakesuffix", true, &rp).(*resourceServer)
    +				allocator := mocks.Allocator{}
    +				rs := NewResourceServer("fakeprefix", "fakesuffix", true, &rp, &allocator).(*resourceServer)
     				err = rs.Init()
     			})
     			It("should never fail", func() {
    @@ -154,9 +159,10 @@ var _ = Describe("Server", func() {
     					On("Probe").Return(true).
     					On("StoreDeviceInfoFile", "fake").Return(nil).
     					On("CleanDeviceInfoFile", "fake").Return(nil)
    +				allocator := mocks.Allocator{}
    
     				// Create ResourceServer with plugin watch mode disabled
    -				rs := NewResourceServer("fake", "fake", false, &rp).(*resourceServer)
    +				rs := NewResourceServer("fake", "fake", false, &rp, &allocator).(*resourceServer)
    
     				registrationServer := createFakeRegistrationServer(fs.RootDir,
     					"fake_fake.com.fake", false, false)
    @@ -196,8 +202,10 @@ var _ = Describe("Server", func() {
     					On("Probe").Return(true).
     					On("StoreDeviceInfoFile", "fake").Return(nil).
     					On("CleanDeviceInfoFile", "fake").Return(nil)
    +				allocator := mocks.Allocator{}
    +
     				// Create ResourceServer with plugin watch mode enabled
    -				rs := NewResourceServer("fake", "fake", true, &rp).(*resourceServer)
    +				rs := NewResourceServer("fake", "fake", true, &rp, &allocator).(*resourceServer)
    
     				registrationServer := createFakeRegistrationServer(fs.RootDir,
     					"fake_fake.com.fake", false, true)
    @@ -231,9 +239,10 @@ var _ = Describe("Server", func() {
     					On("Probe").Return(true).
     					On("StoreDeviceInfoFile", "fake").Return(nil).
     					On("CleanDeviceInfoFile", "fake").Return(nil)
    +				allocator := mocks.Allocator{}
    
     				// Create ResourceServer with plugin watch mode disabled
    -				rs := NewResourceServer("fake", "fake", false, &rp).(*resourceServer)
    +				rs := NewResourceServer("fake", "fake", false, &rp, &allocator).(*resourceServer)
    
     				registrationServer := createFakeRegistrationServer(fs.RootDir,
     					"fake_fake.com.fake", false, false)
    @@ -274,8 +283,9 @@ var _ = Describe("Server", func() {
     				Return(map[string]string{"PCIDEVICE_FAKE_COM_FAKE_INFO": "{\"00:00.01\":{\"netdevice\":{\"pci\":\"00:00.01\"}}}"}, nil).
     				On("GetMounts", []string{"00:00.01"}).
     				Return([]*pluginapi.Mount{{ContainerPath: "/dev/fake", HostPath: "/dev/fake", ReadOnly: false}})
    +			allocator := mocks.Allocator{}
    
    -			rs := NewResourceServer("fake.com", "fake", true, &rp).(*resourceServer)
    +			rs := NewResourceServer("fake.com", "fake", true, &rp, &allocator).(*resourceServer)
    
     			resp, err := rs.Allocate(context.TODO(), req)
    
    @@ -327,8 +337,9 @@ var _ = Describe("Server", func() {
     				rp := mocks.ResourcePool{}
     				rp.On("GetResourceName").Return("fake.com").
     					On("GetDevices").Return(map[string]*pluginapi.Device{"00:00.01": {ID: "00:00.01", Health: "Healthy"}}).Once()
    +				allocator := mocks.Allocator{}
    
    -				rs := NewResourceServer("fake.com", "fake", true, &rp).(*resourceServer)
    +				rs := NewResourceServer("fake.com", "fake", true, &rp, &allocator).(*resourceServer)
     				rs.sockPath = fs.RootDir
    
     				lwSrv := &fakeListAndWatchServer{
    @@ -348,8 +359,9 @@ var _ = Describe("Server", func() {
     				rp.On("GetResourceName").Return("fake.com").
     					On("GetDevices").Return(map[string]*pluginapi.Device{"00:00.01": {ID: "00:00.01", Health: "Healthy"}}).Once().
     					On("GetDevices").Return(map[string]*pluginapi.Device{"00:00.02": {ID: "00:00.02", Health: "Healthy"}}).Once()
    +				allocator := mocks.Allocator{}
    
    -				rs := NewResourceServer("fake.com", "fake", true, &rp).(*resourceServer)
    +				rs := NewResourceServer("fake.com", "fake", true, &rp, &allocator).(*resourceServer)
     				rs.sockPath = fs.RootDir
    
     				lwSrv := &fakeListAndWatchServer{
    @@ -383,8 +395,9 @@ var _ = Describe("Server", func() {
     				rp.On("GetResourceName").Return("fake.com").
     					On("GetDevices").Return(map[string]*pluginapi.Device{"00:00.01": {ID: "00:00.01", Health: "Healthy"}}).Once().
     					On("GetDevices").Return(map[string]*pluginapi.Device{"00:00.02": {ID: "00:00.02", Health: "Healthy"}}).Once()
    +				allocator := mocks.Allocator{}
    
    -				rs := NewResourceServer("fake.com", "fake", true, &rp).(*resourceServer)
    +				rs := NewResourceServer("fake.com", "fake", true, &rp, &allocator).(*resourceServer)
     				rs.sockPath = fs.RootDir
    
     				lwSrv := &fakeListAndWatchServer{
    @@ -414,4 +427,62 @@ var _ = Describe("Server", func() {
     			}, 30.0)
     		})
     	})
    +
    +	DescribeTable("GetPreferredAllocation",
    +		func(req *pluginapi.ContainerPreferredAllocationRequest, expectedRespLength int, shouldFail bool) {
    +			rp := mocks.ResourcePool{}
    +			rp.On("GetDevicePool").
    +				Return(map[string]types.PciDevice{
    +					"0000:1b:00.1": nil,
    +					"0000:bc:00.0": nil,
    +					"0000:1c:00.0": nil,
    +					"0000:df:01.0": nil,
    +					"0000:af:00.1": nil,
    +					"0000:af:01.1": nil,
    +					"0000:af:01.2": nil,
    +				}).
    +				On("GetResourceName").Return("fake.com")
    +
    +			allocator := mocks.Allocator{}
    +			allocator.On("Allocate", req, &rp).Return([]string{"0000:1b:00.1", "0000:1c:00.0", "0000:af:01.1", "0000:af:01.2"})
    +
    +			rs := NewResourceServer("fake.com", "fake", true, &rp, &allocator).(*resourceServer)
    +
    +			resp, err := rs.GetPreferredAllocation(context.TODO(), &pluginapi.PreferredAllocationRequest{
    +				ContainerRequests: []*pluginapi.ContainerPreferredAllocationRequest{req}})
    +
    +			Expect(len(resp.GetContainerResponses())).To(Equal(expectedRespLength))
    +
    +			if shouldFail {
    +				Expect(err).To(HaveOccurred())
    +			} else {
    +				Expect(err).NotTo(HaveOccurred())
    +			}
    +		},
    +		Entry("prefer to allocate 4 deviceID in order",
    +			&pluginapi.ContainerPreferredAllocationRequest{
    +				AvailableDeviceIDs: []string{
    +					"0000:1b:00.1",
    +					"0000:bc:00.0",
    +					"0000:1c:00.0",
    +					"0000:df:01.0",
    +					"0000:af:01.1",
    +					"0000:af:01.2"},
    +				MustIncludeDeviceIDs: []string{},
    +				AllocationSize:       int32(4),
    +			},
    +			1,
    +			false,
    +		),
    +		Entry("allocating deviceID that does not exist",
    +			&pluginapi.ContainerPreferredAllocationRequest{
    +				AvailableDeviceIDs:   []string{"00:00.02"},
    +				MustIncludeDeviceIDs: []string{},
    +				AllocationSize:       int32(1),
    +			},
    +			1,
    +			false,
    +		),
    +		Entry("empty PreferredAllocationRequest", &pluginapi.ContainerPreferredAllocationRequest{}, 1, false),
    +	)
     })
    diff --git a/pkg/types/mocks/ResourceFactory.go b/pkg/types/mocks/ResourceFactory.go
    index 54f171fb..a93903d3 100644
    --- a/pkg/types/mocks/ResourceFactory.go
    +++ b/pkg/types/mocks/ResourceFactory.go
    @@ -1,4 +1,4 @@
    -// Code generated by mockery v2.26.1. DO NOT EDIT.
    +// Code generated by mockery v2.15.0. DO NOT EDIT.
    
     package mocks
    
    @@ -12,6 +12,32 @@ type ResourceFactory struct {
     	mock.Mock
     }
    
    +// GetAllocator provides a mock function with given fields: _a0
    +func (_m *ResourceFactory) GetAllocator(_a0 string) (types.Allocator, error) {
    +	ret := _m.Called(_a0)
    +
    +	var r0 types.Allocator
    +	var r1 error
    +	if rf, ok := ret.Get(0).(func(string) (types.Allocator, error)); ok {
    +		return rf(_a0)
    +	}
    +	if rf, ok := ret.Get(0).(func(string) types.Allocator); ok {
    +		r0 = rf(_a0)
    +	} else {
    +		if ret.Get(0) != nil {
    +			r0 = ret.Get(0).(types.Allocator)
    +		}
    +	}
    +
    +	if rf, ok := ret.Get(1).(func(string) error); ok {
    +		r1 = rf(_a0)
    +	} else {
    +		r1 = ret.Error(1)
    +	}
    +
    +	return r0, r1
    +}
    +
     // GetDefaultInfoProvider provides a mock function with given fields: _a0, _a1
     func (_m *ResourceFactory) GetDefaultInfoProvider(_a0 string, _a1 string) []types.DeviceInfoProvider {
     	ret := _m.Called(_a0, _a1)
    diff --git a/pkg/types/mocks/ResourceServer.go b/pkg/types/mocks/ResourceServer.go
    index 43a2bea2..891345ac 100644
    --- a/pkg/types/mocks/ResourceServer.go
    +++ b/pkg/types/mocks/ResourceServer.go
    @@ -1,4 +1,4 @@
    -// Code generated by mockery v2.26.1. DO NOT EDIT.
    +// Code generated by mockery v2.15.0. DO NOT EDIT.
    
     package mocks
    
    diff --git a/pkg/types/types.go b/pkg/types/types.go
    index ef213dca..8305f9c2 100644
    --- a/pkg/types/types.go
    +++ b/pkg/types/types.go
    @@ -180,6 +180,7 @@ type ResourceFactory interface {
     	GetDeviceProvider(DeviceType) DeviceProvider
     	GetDeviceFilter(*ResourceConfig) ([]interface{}, error)
     	GetNadUtils() NadUtils
    +	GetAllocator(string) (Allocator, error)
     }
    
     // ResourcePool represents a generic resource entity
    wattmto committed Jul 25, 2023
    Configuration menu
    Copy the full SHA
    cb73c03 View commit details
    Browse the repository at this point in the history
  4. Update readme with PreferredAllocation

    diff --git a/README.md b/README.md
    index 4b4d841d..ab7fada5 100644
    --- a/README.md
    +++ b/README.md
    @@ -51,6 +51,7 @@ which are available on a Kubernetes host
     - Supports devices with both Kernel and userspace (UIO and VFIO) drivers
     - Allows resource grouping using "selector(s)"
     - User configurable resourceName
    +- User configurable policy for preferred device allocation
     - Detects Kubelet restarts and auto-re-register
     - Detects Link status (for Linux network devices) and updates associated VFs health accordingly
     - Extensible to support new device types with minimal effort if not already supported
    @@ -269,17 +270,27 @@ This plugin creates device plugin endpoints based on the configurations given in
    
     `"resourceList"` should contain a list of config objects. Each config object may consist of following fields:
    
    -|       Field       | Required |                                                              Description                                                               |                     Type/Defaults                     |                         Example/Accepted values                        |
    -|-------------------|----------|----------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------|------------------------------------------------------------------------|
    -| "resourceName"    | Y        | Endpoint resource name. Should not contain special characters including hyphens and must be unique in the scope of the resource prefix | string                                                | "sriov_net_A"                                                          |
    -| "resourcePrefix"  | N        | Endpoint resource prefix name override. Should not contain special characters                                                          | string Default : "intel.com"                          | "yourcompany.com"                                                      |
    -| "deviceType"      | N        | Device Type for a resource pool.                                                                                                       | string value of supported types. Default: "netDevice" | Currently supported values: "accelerator", "netDevice", "auxNetDevice" |
    -| "excludeTopology" | N        | Exclude advertising of device's NUMA topology                                                                                          | bool Default: "false"                                 | "excludeTopology": true                                                |
    -| "selectors"       | N        | Either a single device selector map or a list of maps. The list syntax is preferred. The "deviceType" value determines the device selector options.                                                  | json list of objects or json object. Default: null                   | Example: "selectors": [{"vendors": ["8086"],"devices": ["154c"]}]        |
    -| "additionalInfo" | N | A map of map to add additional information to the pod via environment variables to devices                                             | json object as string Default: null  | Example: "additionalInfo": {"*": {"token": "3e49019f-412f-4f02-824e-4cd195944205"}} |
    +| Field              | Required |                                                              Description                                                               |                     Type/Defaults                     |                         Example/Accepted values                        |
    +|--------------------|----------|----------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------|------------------------------------------------------------------------|
    +| "resourceName"     | Y        | Endpoint resource name. Should not contain special characters including hyphens and must be unique in the scope of the resource prefix | string                                                | "sriov_net_A"                                                          |
    +| "resourcePrefix"   | N        | Endpoint resource prefix name override. Should not contain special characters                                                          | string Default : "intel.com"                          | "yourcompany.com"                                                      |
    +| "deviceType"       | N        | Device Type for a resource pool.                                                                                                       | string value of supported types. Default: "netDevice" | Currently supported values: "accelerator", "netDevice", "auxNetDevice" |
    +| "allocationPolicy" | N        | Preferred device allocation policy for a resource pool                                                                                 | string value of supported allocation policy. Default: "" | Currently supported values: "", "packed"                                            |
    +| "excludeTopology"  | N        | Exclude advertising of device's NUMA topology                                                                                          | bool Default: "false"                                 | "excludeTopology": true                                                |
    +| "selectors"        | N        | Either a single device selector map or a list of maps. The list syntax is preferred. The "deviceType" value determines the device selector options.                                                  | json list of objects or json object. Default: null                   | Example: "selectors": [{"vendors": ["8086"],"devices": ["154c"]}]        |
    +| "additionalInfo"   | N        | A map of map to add additional information to the pod via environment variables to devices                                             | json object as string Default: null  | Example: "additionalInfo": {"*": {"token": "3e49019f-412f-4f02-824e-4cd195944205"}} |
    
     Note: "resourceName" must be unique only in the scope of a given prefix, including the one specified globally in the CLI params, e.g. "example.com/10G", "acme.com/10G" and "acme.com/40G" are perfectly valid names.
    
    +#### Allocation Policy
    +
    +The "allocationPolicy" value determines which device in a resource pool is allocated. Each policy acts as following:
    +
    +| Policy   | Description                                       |
    +|----------|---------------------------------------------------|
    +| ""       | Disable preferred device allocation functionality |
    +| "packed" | Try to allocate VFs from same PF                  |
    +
     #### Device selectors
    
     The "selectors" field accepts both a single object and a list of selector objects. While both formats are supported, the list syntax is preferred. When using the list syntax, each selector object is evaluated in the order present in the list. For example, a single object would look like:
    wattmto committed Jul 25, 2023
    Configuration menu
    Copy the full SHA
    dc4750d View commit details
    Browse the repository at this point in the history