diff --git a/api/grpc/nvidia/ipam/node/v1/node.pb.go b/api/grpc/nvidia/ipam/node/v1/node.pb.go index e0ab65b..e355b89 100644 --- a/api/grpc/nvidia/ipam/node/v1/node.pb.go +++ b/api/grpc/nvidia/ipam/node/v1/node.pb.go @@ -31,6 +31,56 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// indicates type of the pool which is referred by the name +type PoolType int32 + +const ( + PoolType_POOL_TYPE_UNSPECIFIED PoolType = 0 + PoolType_POOL_TYPE_IPPOOL PoolType = 1 + PoolType_POOL_TYPE_CIDRPOOL PoolType = 2 +) + +// Enum value maps for PoolType. +var ( + PoolType_name = map[int32]string{ + 0: "POOL_TYPE_UNSPECIFIED", + 1: "POOL_TYPE_IPPOOL", + 2: "POOL_TYPE_CIDRPOOL", + } + PoolType_value = map[string]int32{ + "POOL_TYPE_UNSPECIFIED": 0, + "POOL_TYPE_IPPOOL": 1, + "POOL_TYPE_CIDRPOOL": 2, + } +) + +func (x PoolType) Enum() *PoolType { + p := new(PoolType) + *p = x + return p +} + +func (x PoolType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PoolType) Descriptor() protoreflect.EnumDescriptor { + return file_nvidia_ipam_node_v1_node_proto_enumTypes[0].Descriptor() +} + +func (PoolType) Type() protoreflect.EnumType { + return &file_nvidia_ipam_node_v1_node_proto_enumTypes[0] +} + +func (x PoolType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PoolType.Descriptor instead. +func (PoolType) EnumDescriptor() ([]byte, []int) { + return file_nvidia_ipam_node_v1_node_proto_rawDescGZIP(), []int{0} +} + // AllocateRequest contains parameters for Allocate rpc call type AllocateRequest struct { state protoimpl.MessageState @@ -95,6 +145,8 @@ type IPAMParameters struct { CniIfname string `protobuf:"bytes,3,opt,name=cni_ifname,json=cniIfname,proto3" json:"cni_ifname,omitempty"` // required, additional metadata to identify IP allocation Metadata *IPAMMetadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` + // type of the pool which is refered by the name in the pools field + PoolType PoolType `protobuf:"varint,5,opt,name=pool_type,json=poolType,proto3,enum=nvidia.ipam.node.v1.PoolType" json:"pool_type,omitempty"` } func (x *IPAMParameters) Reset() { @@ -157,6 +209,13 @@ func (x *IPAMParameters) GetMetadata() *IPAMMetadata { return nil } +func (x *IPAMParameters) GetPoolType() PoolType { + if x != nil { + return x.PoolType + } + return PoolType_POOL_TYPE_UNSPECIFIED +} + // IPAMMetadata contains metadata for IPAM calls type IPAMMetadata struct { state protoimpl.MessageState @@ -392,6 +451,8 @@ type AllocationInfo struct { Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // gateway for allocated IP Gateway string `protobuf:"bytes,3,opt,name=gateway,proto3" json:"gateway,omitempty"` + // type of the pool which is refered by the name in the pools field + PoolType PoolType `protobuf:"varint,4,opt,name=pool_type,json=poolType,proto3,enum=nvidia.ipam.node.v1.PoolType" json:"pool_type,omitempty"` } func (x *AllocationInfo) Reset() { @@ -447,6 +508,13 @@ func (x *AllocationInfo) GetGateway() string { return "" } +func (x *AllocationInfo) GetPoolType() PoolType { + if x != nil { + return x.PoolType + } + return PoolType_POOL_TYPE_UNSPECIFIED +} + // IsAllocatedReply contains reply for IsAllocated rpc call type IsAllocatedResponse struct { state protoimpl.MessageState @@ -536,7 +604,7 @@ var file_nvidia_ipam_node_v1_node_proto_rawDesc = []byte{ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0xad, 0x01, + 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0xe9, 0x01, 0x0a, 0x0e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6e, 0x69, 0x5f, 0x63, 0x6f, @@ -547,61 +615,74 @@ var file_nvidia_ipam_node_v1_node_proto_rawDesc = []byte{ 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x99, 0x01, - 0x0a, 0x0c, 0x49, 0x50, 0x41, 0x4d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x20, - 0x0a, 0x0c, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6b, 0x38, 0x73, - 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0b, - 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x55, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x22, 0x59, 0x0a, 0x12, 0x49, 0x73, 0x41, - 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x43, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x22, 0x58, 0x0a, 0x11, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0x59, - 0x0a, 0x10, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, - 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6c, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x61, 0x6c, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4e, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x70, - 0x6f, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x22, 0x15, 0x0a, 0x13, 0x49, 0x73, 0x41, - 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xad, 0x02, 0x0a, 0x0b, 0x49, 0x50, 0x41, 0x4d, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x59, 0x0a, 0x08, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x65, 0x12, 0x24, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, - 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, - 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, - 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x62, 0x0a, 0x0b, 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x27, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x6e, 0x76, 0x69, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, + 0x09, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1d, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6f, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x08, 0x70, 0x6f, 0x6f, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x22, 0x99, 0x01, 0x0a, 0x0c, 0x49, 0x50, + 0x41, 0x4d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x20, 0x0a, 0x0c, 0x6b, 0x38, + 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, + 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0b, 0x6b, 0x38, 0x73, 0x5f, + 0x70, 0x6f, 0x64, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6b, + 0x38, 0x73, 0x50, 0x6f, 0x64, 0x55, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x65, 0x76, 0x69, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x65, 0x76, + 0x69, 0x63, 0x65, 0x49, 0x64, 0x22, 0x59, 0x0a, 0x12, 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x22, 0x58, 0x0a, 0x11, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x76, 0x69, 0x64, + 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x50, 0x41, 0x4d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0x59, 0x0a, 0x10, 0x41, 0x6c, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, + 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, + 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x8a, 0x01, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x6f, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x18, 0x0a, 0x07, + 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x6e, 0x76, 0x69, 0x64, + 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x6f, 0x6f, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x70, 0x6f, 0x6f, 0x6c, 0x54, 0x79, + 0x70, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x61, + 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, + 0x53, 0x0a, 0x08, 0x50, 0x6f, 0x6f, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x50, + 0x4f, 0x4f, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x4f, 0x4f, 0x4c, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x50, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, + 0x50, 0x4f, 0x4f, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x49, 0x44, 0x52, 0x50, 0x4f, + 0x4f, 0x4c, 0x10, 0x02, 0x32, 0xad, 0x02, 0x0a, 0x0b, 0x49, 0x50, 0x41, 0x4d, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x59, 0x0a, 0x08, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x24, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, + 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6c, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x62, 0x0a, 0x0b, 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x27, + 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, + 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x73, + 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0a, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x65, 0x12, 0x26, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0a, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x6e, 0x76, 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, - 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6e, 0x76, - 0x69, 0x64, 0x69, 0x61, 0x2e, 0x69, 0x70, 0x61, 0x6d, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x44, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -616,35 +697,39 @@ func file_nvidia_ipam_node_v1_node_proto_rawDescGZIP() []byte { return file_nvidia_ipam_node_v1_node_proto_rawDescData } +var file_nvidia_ipam_node_v1_node_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_nvidia_ipam_node_v1_node_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_nvidia_ipam_node_v1_node_proto_goTypes = []interface{}{ - (*AllocateRequest)(nil), // 0: nvidia.ipam.node.v1.AllocateRequest - (*IPAMParameters)(nil), // 1: nvidia.ipam.node.v1.IPAMParameters - (*IPAMMetadata)(nil), // 2: nvidia.ipam.node.v1.IPAMMetadata - (*IsAllocatedRequest)(nil), // 3: nvidia.ipam.node.v1.IsAllocatedRequest - (*DeallocateRequest)(nil), // 4: nvidia.ipam.node.v1.DeallocateRequest - (*AllocateResponse)(nil), // 5: nvidia.ipam.node.v1.AllocateResponse - (*AllocationInfo)(nil), // 6: nvidia.ipam.node.v1.AllocationInfo - (*IsAllocatedResponse)(nil), // 7: nvidia.ipam.node.v1.IsAllocatedResponse - (*DeallocateResponse)(nil), // 8: nvidia.ipam.node.v1.DeallocateResponse + (PoolType)(0), // 0: nvidia.ipam.node.v1.PoolType + (*AllocateRequest)(nil), // 1: nvidia.ipam.node.v1.AllocateRequest + (*IPAMParameters)(nil), // 2: nvidia.ipam.node.v1.IPAMParameters + (*IPAMMetadata)(nil), // 3: nvidia.ipam.node.v1.IPAMMetadata + (*IsAllocatedRequest)(nil), // 4: nvidia.ipam.node.v1.IsAllocatedRequest + (*DeallocateRequest)(nil), // 5: nvidia.ipam.node.v1.DeallocateRequest + (*AllocateResponse)(nil), // 6: nvidia.ipam.node.v1.AllocateResponse + (*AllocationInfo)(nil), // 7: nvidia.ipam.node.v1.AllocationInfo + (*IsAllocatedResponse)(nil), // 8: nvidia.ipam.node.v1.IsAllocatedResponse + (*DeallocateResponse)(nil), // 9: nvidia.ipam.node.v1.DeallocateResponse } var file_nvidia_ipam_node_v1_node_proto_depIdxs = []int32{ - 1, // 0: nvidia.ipam.node.v1.AllocateRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters - 2, // 1: nvidia.ipam.node.v1.IPAMParameters.metadata:type_name -> nvidia.ipam.node.v1.IPAMMetadata - 1, // 2: nvidia.ipam.node.v1.IsAllocatedRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters - 1, // 3: nvidia.ipam.node.v1.DeallocateRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters - 6, // 4: nvidia.ipam.node.v1.AllocateResponse.allocations:type_name -> nvidia.ipam.node.v1.AllocationInfo - 0, // 5: nvidia.ipam.node.v1.IPAMService.Allocate:input_type -> nvidia.ipam.node.v1.AllocateRequest - 3, // 6: nvidia.ipam.node.v1.IPAMService.IsAllocated:input_type -> nvidia.ipam.node.v1.IsAllocatedRequest - 4, // 7: nvidia.ipam.node.v1.IPAMService.Deallocate:input_type -> nvidia.ipam.node.v1.DeallocateRequest - 5, // 8: nvidia.ipam.node.v1.IPAMService.Allocate:output_type -> nvidia.ipam.node.v1.AllocateResponse - 7, // 9: nvidia.ipam.node.v1.IPAMService.IsAllocated:output_type -> nvidia.ipam.node.v1.IsAllocatedResponse - 8, // 10: nvidia.ipam.node.v1.IPAMService.Deallocate:output_type -> nvidia.ipam.node.v1.DeallocateResponse - 8, // [8:11] is the sub-list for method output_type - 5, // [5:8] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 2, // 0: nvidia.ipam.node.v1.AllocateRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters + 3, // 1: nvidia.ipam.node.v1.IPAMParameters.metadata:type_name -> nvidia.ipam.node.v1.IPAMMetadata + 0, // 2: nvidia.ipam.node.v1.IPAMParameters.pool_type:type_name -> nvidia.ipam.node.v1.PoolType + 2, // 3: nvidia.ipam.node.v1.IsAllocatedRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters + 2, // 4: nvidia.ipam.node.v1.DeallocateRequest.parameters:type_name -> nvidia.ipam.node.v1.IPAMParameters + 7, // 5: nvidia.ipam.node.v1.AllocateResponse.allocations:type_name -> nvidia.ipam.node.v1.AllocationInfo + 0, // 6: nvidia.ipam.node.v1.AllocationInfo.pool_type:type_name -> nvidia.ipam.node.v1.PoolType + 1, // 7: nvidia.ipam.node.v1.IPAMService.Allocate:input_type -> nvidia.ipam.node.v1.AllocateRequest + 4, // 8: nvidia.ipam.node.v1.IPAMService.IsAllocated:input_type -> nvidia.ipam.node.v1.IsAllocatedRequest + 5, // 9: nvidia.ipam.node.v1.IPAMService.Deallocate:input_type -> nvidia.ipam.node.v1.DeallocateRequest + 6, // 10: nvidia.ipam.node.v1.IPAMService.Allocate:output_type -> nvidia.ipam.node.v1.AllocateResponse + 8, // 11: nvidia.ipam.node.v1.IPAMService.IsAllocated:output_type -> nvidia.ipam.node.v1.IsAllocatedResponse + 9, // 12: nvidia.ipam.node.v1.IPAMService.Deallocate:output_type -> nvidia.ipam.node.v1.DeallocateResponse + 10, // [10:13] is the sub-list for method output_type + 7, // [7:10] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_nvidia_ipam_node_v1_node_proto_init() } @@ -767,13 +852,14 @@ func file_nvidia_ipam_node_v1_node_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_nvidia_ipam_node_v1_node_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 9, NumExtensions: 0, NumServices: 1, }, GoTypes: file_nvidia_ipam_node_v1_node_proto_goTypes, DependencyIndexes: file_nvidia_ipam_node_v1_node_proto_depIdxs, + EnumInfos: file_nvidia_ipam_node_v1_node_proto_enumTypes, MessageInfos: file_nvidia_ipam_node_v1_node_proto_msgTypes, }.Build() File_nvidia_ipam_node_v1_node_proto = out.File diff --git a/api/grpc/proto/nvidia/ipam/node/v1/node.proto b/api/grpc/proto/nvidia/ipam/node/v1/node.proto index cc5d610..f824b9f 100644 --- a/api/grpc/proto/nvidia/ipam/node/v1/node.proto +++ b/api/grpc/proto/nvidia/ipam/node/v1/node.proto @@ -13,6 +13,13 @@ syntax = "proto3"; package nvidia.ipam.node.v1; +// indicates type of the pool which is referred by the name +enum PoolType { + POOL_TYPE_UNSPECIFIED = 0; + POOL_TYPE_IPPOOL = 1; + POOL_TYPE_CIDRPOOL = 2; +} + // gRPC service definition for NVIDIA IPAM node daemon service IPAMService { // Allocate is called as a part of CMD_ADD flow. @@ -64,6 +71,8 @@ message IPAMParameters { string cni_ifname = 3; // required, additional metadata to identify IP allocation IPAMMetadata metadata = 4; + // type of the pool which is refered by the name in the pools field + PoolType pool_type = 5; } // IPAMMetadata contains metadata for IPAM calls @@ -104,6 +113,8 @@ message AllocationInfo { string ip = 2; // gateway for allocated IP string gateway = 3; + // type of the pool which is refered by the name in the pools field + PoolType pool_type = 4; } // IsAllocatedReply contains reply for IsAllocated rpc call diff --git a/cmd/ipam-node/app/app.go b/cmd/ipam-node/app/app.go index a61071e..bc2dfdc 100644 --- a/cmd/ipam-node/app/app.go +++ b/cmd/ipam-node/app/app.go @@ -58,6 +58,7 @@ import ( "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/cleaner" + cidrpoolctrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/controllers/cidrpool" ippoolctrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/controllers/ippool" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/grpc/middleware" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/handlers" @@ -157,6 +158,7 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio k8sClient, err := client.New(config, client.Options{Scheme: mgr.GetScheme(), Mapper: mgr.GetRESTMapper()}) if err != nil { logger.Error(err, "unable to direct k8s client") + return err } if err = (&ippoolctrl.IPPoolReconciler{ @@ -168,6 +170,15 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio logger.Error(err, "unable to create controller", "controller", "IPPool") return err } + if err = (&cidrpoolctrl.CIDRPoolReconciler{ + PoolManager: poolManager, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + NodeName: opts.NodeName, + }).SetupWithManager(mgr); err != nil { + logger.Error(err, "unable to create controller", "controller", "CIDRPool") + return err + } if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { logger.Error(err, "unable to set up health check") diff --git a/cmd/ipam-node/app/app_test.go b/cmd/ipam-node/app/app_test.go index cab85f5..50ecadc 100644 --- a/cmd/ipam-node/app/app_test.go +++ b/cmd/ipam-node/app/app_test.go @@ -27,7 +27,6 @@ import ( "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" @@ -44,7 +43,7 @@ const ( testNamespace = "default" ) -func createTestPools() { +func createTestIPPools() { pool1 := &ipamv1alpha1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: testPoolName1, Namespace: testNamespace}, Spec: ipamv1alpha1.IPPoolSpec{ @@ -54,6 +53,15 @@ func createTestPools() { }, } ExpectWithOffset(1, k8sClient.Create(ctx, pool1)) + pool1.Status = ipamv1alpha1.IPPoolStatus{ + Allocations: []ipamv1alpha1.Allocation{ + { + NodeName: testNodeName, + StartIP: "192.168.0.2", + EndIP: "192.168.0.254", + }, + }} + ExpectWithOffset(1, k8sClient.Status().Update(ctx, pool1)) pool2 := &ipamv1alpha1.IPPool{ ObjectMeta: metav1.ObjectMeta{Name: testPoolName2, Namespace: testNamespace}, @@ -64,47 +72,57 @@ func createTestPools() { }, } ExpectWithOffset(1, k8sClient.Create(ctx, pool2)) - - // Update statuses with range allocation - Eventually(func(g Gomega) error { - status := ipamv1alpha1.IPPoolStatus{ - Allocations: []ipamv1alpha1.Allocation{ - { - NodeName: testNodeName, - StartIP: "192.168.0.2", - EndIP: "192.168.0.254", - }, - }, - } - return updatePoolStatus(testPoolName1, status) - }, 30, 5).Should(Not(HaveOccurred())) - - Eventually(func(g Gomega) error { - status := ipamv1alpha1.IPPoolStatus{ - Allocations: []ipamv1alpha1.Allocation{ - { - NodeName: testNodeName, - StartIP: "10.100.0.2", - EndIP: "10.100.0.254", - }, + pool2.Status = ipamv1alpha1.IPPoolStatus{ + Allocations: []ipamv1alpha1.Allocation{ + { + NodeName: testNodeName, + StartIP: "10.100.0.2", + EndIP: "10.100.0.254", }, - } - return updatePoolStatus(testPoolName2, status) - }, 30, 5).Should(Not(HaveOccurred())) + }} + ExpectWithOffset(1, k8sClient.Status().Update(ctx, pool2)) } -func updatePoolStatus(poolName string, status ipamv1alpha1.IPPoolStatus) error { - pool := &ipamv1alpha1.IPPool{} - err := k8sClient.Get(ctx, types.NamespacedName{Name: poolName, Namespace: testNamespace}, pool) - if err != nil { - return err +func createTestCIDRPools() { + pool1GatewayIndex := uint(1) + pool1 := &ipamv1alpha1.CIDRPool{ + ObjectMeta: metav1.ObjectMeta{Name: testPoolName1, Namespace: testNamespace}, + Spec: ipamv1alpha1.CIDRPoolSpec{ + CIDR: "192.100.0.0/16", + GatewayIndex: &pool1GatewayIndex, + PerNodeNetworkPrefix: 24, + Exclusions: []ipamv1alpha1.ExcludeRange{ + {StartIP: "192.100.0.1", EndIP: "192.100.0.10"}, + }, + }, } - pool.Status = status - err = k8sClient.Status().Update(ctx, pool) - if err != nil { - return err + ExpectWithOffset(1, k8sClient.Create(ctx, pool1)) + pool1.Status = ipamv1alpha1.CIDRPoolStatus{ + Allocations: []ipamv1alpha1.CIDRPoolAllocation{ + { + NodeName: testNodeName, + Prefix: "192.100.0.0/24", + Gateway: "192.100.0.1", + }, + }} + ExpectWithOffset(1, k8sClient.Status().Update(ctx, pool1)) + + pool2 := &ipamv1alpha1.CIDRPool{ + ObjectMeta: metav1.ObjectMeta{Name: testPoolName2, Namespace: testNamespace}, + Spec: ipamv1alpha1.CIDRPoolSpec{ + CIDR: "10.200.0.0/24", + PerNodeNetworkPrefix: 31, + }, } - return nil + ExpectWithOffset(1, k8sClient.Create(ctx, pool2)) + pool2.Status = ipamv1alpha1.CIDRPoolStatus{ + Allocations: []ipamv1alpha1.CIDRPoolAllocation{ + { + NodeName: testNodeName, + Prefix: "10.200.0.0/31", + }, + }} + ExpectWithOffset(1, k8sClient.Status().Update(ctx, pool2)) } func createTestPod() *corev1.Pod { @@ -161,10 +179,13 @@ var _ = Describe("IPAM Node daemon", func() { It("Validate main flows", func() { done := make(chan interface{}) go func() { + defer GinkgoRecover() + defer close(done) testDir := GinkgoT().TempDir() opts := getOptions(testDir) - createTestPools() + createTestIPPools() + createTestCIDRPools() pod := createTestPod() ctx = logr.NewContext(ctx, klog.NewKlogr()) @@ -180,38 +201,41 @@ var _ = Describe("IPAM Node daemon", func() { grpcClient := nodev1.NewIPAMServiceClient(conn) - params := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace) - - // no allocation yet - _, err = grpcClient.IsAllocated(ctx, - &nodev1.IsAllocatedRequest{Parameters: params}) - Expect(status.Code(err) == codes.NotFound).To(BeTrue()) - - // allocate - resp, err := grpcClient.Allocate(ctx, &nodev1.AllocateRequest{Parameters: params}) - Expect(err).NotTo(HaveOccurred()) - Expect(resp.Allocations).To(HaveLen(2)) - Expect(resp.Allocations[0].Pool).NotTo(BeEmpty()) - Expect(resp.Allocations[0].Gateway).NotTo(BeEmpty()) - Expect(resp.Allocations[0].Ip).NotTo(BeEmpty()) - - _, err = grpcClient.IsAllocated(ctx, - &nodev1.IsAllocatedRequest{Parameters: params}) - Expect(err).NotTo(HaveOccurred()) - - // deallocate - _, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params}) - Expect(err).NotTo(HaveOccurred()) - - // deallocate should be idempotent - _, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params}) - Expect(err).NotTo(HaveOccurred()) - - // check should fail - _, err = grpcClient.IsAllocated(ctx, - &nodev1.IsAllocatedRequest{Parameters: params}) - Expect(status.Code(err) == codes.NotFound).To(BeTrue()) - close(done) + cidrPoolParams := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace) + cidrPoolParams.PoolType = nodev1.PoolType_POOL_TYPE_CIDRPOOL + ipPoolParams := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace) + + for _, params := range []*nodev1.IPAMParameters{ipPoolParams, cidrPoolParams} { + // no allocation yet + _, err = grpcClient.IsAllocated(ctx, + &nodev1.IsAllocatedRequest{Parameters: params}) + Expect(status.Code(err) == codes.NotFound).To(BeTrue()) + + // allocate + resp, err := grpcClient.Allocate(ctx, &nodev1.AllocateRequest{Parameters: params}) + Expect(err).NotTo(HaveOccurred()) + Expect(resp.Allocations).To(HaveLen(2)) + Expect(resp.Allocations[0].Pool).NotTo(BeEmpty()) + Expect(resp.Allocations[0].Gateway).NotTo(BeEmpty()) + Expect(resp.Allocations[0].Ip).NotTo(BeEmpty()) + + _, err = grpcClient.IsAllocated(ctx, + &nodev1.IsAllocatedRequest{Parameters: params}) + Expect(err).NotTo(HaveOccurred()) + + // deallocate + _, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params}) + Expect(err).NotTo(HaveOccurred()) + + // deallocate should be idempotent + _, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params}) + Expect(err).NotTo(HaveOccurred()) + + // check should fail + _, err = grpcClient.IsAllocated(ctx, + &nodev1.IsAllocatedRequest{Parameters: params}) + Expect(status.Code(err) == codes.NotFound).To(BeTrue()) + } }() Eventually(done, 5*time.Minute).Should(BeClosed()) }) diff --git a/pkg/cni/plugin/plugin.go b/pkg/cni/plugin/plugin.go index 9aed698..fa791a4 100644 --- a/pkg/cni/plugin/plugin.go +++ b/pkg/cni/plugin/plugin.go @@ -28,6 +28,7 @@ import ( nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" "github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/version" ) @@ -197,8 +198,14 @@ func cniConfToGRPCReq(conf *types.NetConf, args *skel.CmdArgs) (*nodev1.IPAMPara if err != nil { return nil, fmt.Errorf("failed to load extra CNI args: %v", err) } + + poolType := nodev1.PoolType_POOL_TYPE_IPPOOL + if conf.IPAM.PoolType == common.PoolTypeCIDRPool { + poolType = nodev1.PoolType_POOL_TYPE_CIDRPOOL + } req := &nodev1.IPAMParameters{ Pools: conf.IPAM.Pools, + PoolType: poolType, CniIfname: args.IfName, CniContainerid: args.ContainerID, Metadata: &nodev1.IPAMMetadata{ diff --git a/pkg/cni/plugin/plugin_test.go b/pkg/cni/plugin/plugin_test.go index 98eefa2..fa49bf9 100644 --- a/pkg/cni/plugin/plugin_test.go +++ b/pkg/cni/plugin/plugin_test.go @@ -84,6 +84,7 @@ var _ = Describe("plugin tests", func() { mockDaemonClient.On("Allocate", mock.Anything, &nodev1.AllocateRequest{ Parameters: &nodev1.IPAMParameters{ Pools: []string{"my-pool"}, + PoolType: nodev1.PoolType_POOL_TYPE_IPPOOL, CniIfname: "net1", CniContainerid: "1234", Metadata: &nodev1.IPAMMetadata{ @@ -92,9 +93,10 @@ var _ = Describe("plugin tests", func() { }, }}).Return(&nodev1.AllocateResponse{ Allocations: []*nodev1.AllocationInfo{{ - Pool: "my-pool", - Ip: "192.168.1.10/24", - Gateway: "192.168.1.1", + Pool: "my-pool", + Ip: "192.168.1.10/24", + Gateway: "192.168.1.1", + PoolType: nodev1.PoolType_POOL_TYPE_IPPOOL, }}, }, nil) err := p.CmdAdd(args) diff --git a/pkg/cni/types/types.go b/pkg/cni/types/types.go index 5eff194..7024df0 100644 --- a/pkg/cni/types/types.go +++ b/pkg/cni/types/types.go @@ -21,6 +21,8 @@ import ( "strings" "github.com/containernetworking/cni/pkg/types" + + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" ) const ( @@ -52,6 +54,9 @@ type IPAMConf struct { // PoolName is the name of the pool to be used to allocate IP PoolName string `json:"poolName,omitempty"` + // PoolType is the type of the pool which is referred by the PoolName, + // supported values: ippool, cidrpool + PoolType string `json:"poolType,omitempty"` // Address of the NVIDIA-ipam DaemonSocket DaemonSocket string `json:"daemonSocket,omitempty"` DaemonCallTimeoutSeconds int `json:"daemonCallTimeoutSeconds,omitempty"` @@ -106,6 +111,7 @@ func (cl *confLoader) LoadConf(bytes []byte) (*NetConf, error) { defaultConf := &IPAMConf{ // use network name as pool name by default PoolName: n.Name, + PoolType: common.PoolTypeIPPool, ConfDir: DefaultConfDir, LogFile: DefaultLogFile, DaemonSocket: DefaultDaemonSocket, @@ -119,6 +125,12 @@ func (cl *confLoader) LoadConf(bytes []byte) (*NetConf, error) { return nil, err } + n.IPAM.PoolType = strings.ToLower(n.IPAM.PoolType) + if n.IPAM.PoolType != common.PoolTypeIPPool && n.IPAM.PoolType != common.PoolTypeCIDRPool { + return nil, fmt.Errorf("unsupported poolType %s, supported values: %s, %s", + n.IPAM.PoolType, common.PoolTypeIPPool, common.PoolTypeCIDRPool) + } + return n, nil } @@ -170,6 +182,10 @@ func (cl *confLoader) overlayConf(from, to *IPAMConf) { to.PoolName = from.PoolName } + if to.PoolType == "" { + to.PoolType = from.PoolType + } + if to.DaemonSocket == "" { to.DaemonSocket = from.DaemonSocket } diff --git a/pkg/common/consts.go b/pkg/common/consts.go index bbf38dd..b0cca6c 100644 --- a/pkg/common/consts.go +++ b/pkg/common/consts.go @@ -17,3 +17,10 @@ const ( // IPAMName is the name of the IPAM plugin IPAMName = "nvidia-k8s-ipam" ) + +const ( + // PoolTypeIPPool contains string representation for pool type of IPPool + PoolTypeIPPool = "ippool" + // PoolTypeCIDRPool contains string representation for pool type of CIDRPool + PoolTypeCIDRPool = "cidrpool" +) diff --git a/pkg/common/pool_key.go b/pkg/common/pool_key.go new file mode 100644 index 0000000..b7c7f0e --- /dev/null +++ b/pkg/common/pool_key.go @@ -0,0 +1,23 @@ +/* + Copyright 2024, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package common + +// GetPoolKey builds uniq key for pool from poolName and poolType +func GetPoolKey(poolName, poolType string) string { + if poolType == "" || poolType == PoolTypeIPPool { + // to avoid migration of the store, and to support downgrade + return poolName + } + return poolType + "/" + poolName +} diff --git a/pkg/ip/cidr.go b/pkg/ip/cidr.go index 0c66232..8bbbf59 100644 --- a/pkg/ip/cidr.go +++ b/pkg/ip/cidr.go @@ -147,6 +147,24 @@ func IsBroadcast(ip net.IP, network *net.IPNet) bool { return ip.Equal(masked) } +// IsPointToPointSubnet returns true if the network is point to point (/31 or /127) +func IsPointToPointSubnet(network *net.IPNet) bool { + ones, maskLen := network.Mask.Size() + return ones == maskLen-1 +} + +// LastIP returns the last IP of a subnet, excluding the broadcast if IPv4 (if not /31 net) +func LastIP(network *net.IPNet) net.IP { + var end net.IP + for i := 0; i < len(network.IP); i++ { + end = append(end, network.IP[i]|^network.Mask[i]) + } + if network.IP.To4() != nil && !IsPointToPointSubnet(network) { + end[3]-- + } + return end +} + // GetSubnetGen returns generator function that can be called multiple times // to generate subnet for the network with the prefix size. // The function always returns non-nil function. diff --git a/pkg/ip/cidr_test.go b/pkg/ip/cidr_test.go index 1f98499..f45f65d 100644 --- a/pkg/ip/cidr_test.go +++ b/pkg/ip/cidr_test.go @@ -374,4 +374,36 @@ var _ = Describe("CIDR functions", func() { Expect(gen().String()).To(Equal("::4/127")) }) }) + Context("IsPointToPointSubnet", func() { + It("/31", func() { + _, network, _ := net.ParseCIDR("192.168.1.0/31") + Expect(IsPointToPointSubnet(network)).To(BeTrue()) + }) + It("/127", func() { + _, network, _ := net.ParseCIDR("2002:0:0:1234::1/127") + Expect(IsPointToPointSubnet(network)).To(BeTrue()) + }) + It("/24", func() { + _, network, _ := net.ParseCIDR("192.168.1.0/24") + Expect(IsPointToPointSubnet(network)).To(BeFalse()) + }) + }) + Context("LastIP", func() { + It("/31", func() { + _, network, _ := net.ParseCIDR("192.168.1.0/31") + Expect(LastIP(network).String()).To(Equal("192.168.1.1")) + }) + It("/127", func() { + _, network, _ := net.ParseCIDR("2002:0:0:1234::0/127") + Expect(LastIP(network).String()).To(Equal("2002:0:0:1234::1")) + }) + It("/24", func() { + _, network, _ := net.ParseCIDR("192.168.1.0/24") + Expect(LastIP(network).String()).To(Equal("192.168.1.254")) + }) + It("/64", func() { + _, network, _ := net.ParseCIDR("2002:0:0:1234::0/64") + Expect(LastIP(network).String()).To(Equal("2002::1234:ffff:ffff:ffff:ffff")) + }) + }) }) diff --git a/pkg/ipam-controller/migrator/migrator.go b/pkg/ipam-controller/migrator/migrator.go index 55ac4c3..42688fd 100644 --- a/pkg/ipam-controller/migrator/migrator.go +++ b/pkg/ipam-controller/migrator/migrator.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/config" "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" ) @@ -196,7 +197,7 @@ func updateAllocations(ctx context.Context, c client.Client, continue } nodesToClearAnnotation.Insert(node.Name) - nodeIPPoolConfig := poolCfg.GetPoolByName(poolName) + nodeIPPoolConfig := poolCfg.GetPoolByKey(common.GetPoolKey(poolName, common.PoolTypeIPPool)) if nodeIPPoolConfig == nil { nodeLog.Info("skip loading data for pool from the node, pool not configured", "node", node.Name, "pool", poolName) continue diff --git a/pkg/ipam-controller/migrator/migrator_test.go b/pkg/ipam-controller/migrator/migrator_test.go index e1bac62..52a5661 100644 --- a/pkg/ipam-controller/migrator/migrator_test.go +++ b/pkg/ipam-controller/migrator/migrator_test.go @@ -86,7 +86,7 @@ func updateNode(node *corev1.Node) *corev1.Node { return node } -func getRangeFromNode(nodeName string) map[string]*pool.IPPool { +func getRangeFromNode(nodeName string) map[string]*pool.Pool { node := getNode(nodeName) poolCfg, err := pool.NewConfigReader(node) if err != nil { @@ -111,7 +111,7 @@ var _ = Describe("Controller Migrator", func() { By("Set annotation with valid ranges for node1") node1 := createNode(testNode1) - node1InitialRanges := map[string]*pool.IPPool{pool1Name: { + node1InitialRanges := map[string]*pool.Pool{pool1Name: { Name: pool1Name, Subnet: "192.168.0.0/16", StartIP: "192.168.0.11", @@ -129,7 +129,7 @@ var _ = Describe("Controller Migrator", func() { By("Set annotation with valid ranges for node2") node2 := createNode(testNode2) - node2InitialRanges := map[string]*pool.IPPool{pool1Name: { + node2InitialRanges := map[string]*pool.Pool{pool1Name: { Name: pool1Name, Subnet: "192.168.0.0/16", StartIP: "192.168.0.21", diff --git a/pkg/ipam-node/allocator/allocator.go b/pkg/ipam-node/allocator/allocator.go index c3fb5ae..2ee7259 100644 --- a/pkg/ipam-node/allocator/allocator.go +++ b/pkg/ipam-node/allocator/allocator.go @@ -39,17 +39,20 @@ type IPAllocator interface { } type allocator struct { - rangeSet *RangeSet - session storePkg.Session - poolName string + rangeSet *RangeSet + session storePkg.Session + poolKey string + exclusions *RangeSet } // NewIPAllocator create and initialize a new instance of IP allocator -func NewIPAllocator(s *RangeSet, poolName string, session storePkg.Session) IPAllocator { +func NewIPAllocator(s *RangeSet, exclusions *RangeSet, + poolKey string, session storePkg.Session) IPAllocator { return &allocator{ - rangeSet: s, - session: session, - poolName: poolName, + rangeSet: s, + session: session, + poolKey: poolKey, + exclusions: exclusions, } } @@ -64,7 +67,10 @@ func (a *allocator) Allocate(id string, ifName string, meta types.ReservationMet if reservedIP == nil { return nil, ErrNoFreeAddresses } - err := a.session.Reserve(a.poolName, id, ifName, meta, reservedIP.IP) + if a.exclusions != nil && a.exclusions.Contains(reservedIP.IP) { + continue + } + err := a.session.Reserve(a.poolKey, id, ifName, meta, reservedIP.IP) if err == nil { break } @@ -105,7 +111,7 @@ func (a *allocator) getIter() *RangeIter { // We might get a last reserved IP that is wrong if the range indexes changed. // This is not critical, we just lose round-robin this one time. - lastReservedIP := a.session.GetLastReservedIP(a.poolName) + lastReservedIP := a.session.GetLastReservedIP(a.poolKey) if lastReservedIP != nil { startFromLastReservedIP = a.rangeSet.Contains(lastReservedIP) } diff --git a/pkg/ipam-node/allocator/allocator_test.go b/pkg/ipam-node/allocator/allocator_test.go index b9adfe9..b92ca5e 100644 --- a/pkg/ipam-node/allocator/allocator_test.go +++ b/pkg/ipam-node/allocator/allocator_test.go @@ -50,7 +50,7 @@ func mkAlloc(session storePkg.Session) allocator.IPAllocator { allocator.Range{Subnet: mustSubnet("192.168.1.0/29"), Gateway: net.ParseIP("192.168.1.1")}, } Expect(p.Canonicalize()).NotTo(HaveOccurred()) - return allocator.NewIPAllocator(&p, testPoolName, session) + return allocator.NewIPAllocator(&p, nil, testPoolName, session) } func newAllocatorWithMultiRanges(session storePkg.Session) allocator.IPAllocator { @@ -59,7 +59,7 @@ func newAllocatorWithMultiRanges(session storePkg.Session) allocator.IPAllocator allocator.Range{RangeStart: net.IP{192, 168, 2, 0}, RangeEnd: net.IP{192, 168, 2, 3}, Subnet: mustSubnet("192.168.2.0/30")}, } Expect(p.Canonicalize()).NotTo(HaveOccurred()) - return allocator.NewIPAllocator(&p, testPoolName, session) + return allocator.NewIPAllocator(&p, nil, testPoolName, session) } func (t AllocatorTestCase) run(idx int, session storePkg.Session) (*current.IPConfig, error) { @@ -79,7 +79,7 @@ func (t AllocatorTestCase) run(idx int, session storePkg.Session) (*current.IPCo session.SetLastReservedIP(testPoolName, net.ParseIP(t.lastIP)) Expect(p.Canonicalize()).To(Succeed()) - alloc := allocator.NewIPAllocator(&p, testPoolName, session) + alloc := allocator.NewIPAllocator(&p, nil, testPoolName, session) return alloc.Allocate(testContainerID, testIFName, types.ReservationMetadata{}) } @@ -391,11 +391,55 @@ var _ = Describe("allocator", func() { allocator.Range{Subnet: mustSubnet("192.168.1.4/30")}, } Expect(p.Canonicalize()).NotTo(HaveOccurred()) - a := allocator.NewIPAllocator(&p, testPoolName, session) + a := allocator.NewIPAllocator(&p, nil, testPoolName, session) // get range iterator and do the first Next checkAlloc(a, "0", net.IP{192, 168, 1, 1}) checkAlloc(a, "1", net.IP{192, 168, 1, 2}) checkAlloc(a, "2", net.IP{192, 168, 1, 5}) }) }) + Context("point to point ranges", func() { + It("should allocate two IPs", func() { + session, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = session.Commit() + }() + p := allocator.RangeSet{ + allocator.Range{Subnet: mustSubnet("192.168.1.0/31")}, + } + Expect(p.Canonicalize()).NotTo(HaveOccurred()) + a := allocator.NewIPAllocator(&p, nil, testPoolName, session) + // get range iterator and do the first Next + checkAlloc(a, "0", net.IP{192, 168, 1, 0}) + checkAlloc(a, "1", net.IP{192, 168, 1, 1}) + }) + }) + Context("IP address exclusion", func() { + It("should exclude IPs", func() { + session, err := storePkg.New( + filepath.Join(GinkgoT().TempDir(), "test_store")).Open(context.Background()) + Expect(err).NotTo(HaveOccurred()) + defer func() { + _ = session.Commit() + }() + p := allocator.RangeSet{ + allocator.Range{Subnet: mustSubnet("192.168.0.0/29")}, + } + Expect(p.Canonicalize()).NotTo(HaveOccurred()) + e := allocator.RangeSet{ + allocator.Range{Subnet: mustSubnet("192.168.0.0/29"), + RangeStart: net.ParseIP("192.168.0.2"), RangeEnd: net.ParseIP("192.168.0.3")}, + allocator.Range{Subnet: mustSubnet("192.168.0.0/29"), + RangeStart: net.ParseIP("192.168.0.5"), RangeEnd: net.ParseIP("192.168.0.5")}, + } + Expect(e.Canonicalize()).NotTo(HaveOccurred()) + a := allocator.NewIPAllocator(&p, &e, testPoolName, session) + // get range iterator and do the first Next + checkAlloc(a, "0", net.IP{192, 168, 0, 1}) + checkAlloc(a, "1", net.IP{192, 168, 0, 4}) + checkAlloc(a, "2", net.IP{192, 168, 0, 6}) + }) + }) }) diff --git a/pkg/ipam-node/allocator/range.go b/pkg/ipam-node/allocator/range.go index 9cecb38..a547a6c 100644 --- a/pkg/ipam-node/allocator/range.go +++ b/pkg/ipam-node/allocator/range.go @@ -38,10 +38,9 @@ func (r *Range) Canonicalize() error { return err } - // Can't create an allocator for a network with no addresses, eg - // a /32 or /31 + // Can't create an allocator for /32 or /128 networks (single IP) ones, masklen := r.Subnet.Mask.Size() - if ones > masklen-2 { + if ones > masklen-1 { return fmt.Errorf("network %s too small to allocate from", (*net.IPNet)(&r.Subnet).String()) } @@ -75,9 +74,13 @@ func (r *Range) Canonicalize() error { return fmt.Errorf("RangeStart %s not in network %s", r.RangeStart.String(), (*net.IPNet)(&r.Subnet).String()) } } else { - r.RangeStart = ip.NextIP(r.Subnet.IP) - if r.RangeStart == nil { - return fmt.Errorf("computed RangeStart is not a valid IP") + if ip.IsPointToPointSubnet((*net.IPNet)(&r.Subnet)) { + r.RangeStart = r.Subnet.IP + } else { + r.RangeStart = ip.NextIP(r.Subnet.IP) + if r.RangeStart == nil { + return fmt.Errorf("computed RangeStart is not a valid IP") + } } } @@ -92,7 +95,7 @@ func (r *Range) Canonicalize() error { return fmt.Errorf("RangeEnd %s not in network %s", r.RangeEnd.String(), (*net.IPNet)(&r.Subnet).String()) } } else { - r.RangeEnd = lastIP(r.Subnet) + r.RangeEnd = ip.LastIP((*net.IPNet)(&r.Subnet)) } return nil @@ -159,16 +162,3 @@ func CanonicalizeIP(addr *net.IP) error { *addr = normalizedIP return nil } - -// Determine the last IP of a subnet, excluding the broadcast if IPv4 -func lastIP(subnet types.IPNet) net.IP { - var end net.IP - for i := 0; i < len(subnet.IP); i++ { - end = append(end, subnet.IP[i]|^subnet.Mask[i]) - } - if subnet.IP.To4() != nil { - end[3]-- - } - - return end -} diff --git a/pkg/ipam-node/allocator/range_test.go b/pkg/ipam-node/allocator/range_test.go index 671e9bb..335e3a2 100644 --- a/pkg/ipam-node/allocator/range_test.go +++ b/pkg/ipam-node/allocator/range_test.go @@ -52,6 +52,19 @@ var _ = Describe("IP ranges", func() { RangeEnd: net.IP{192, 0, 2, 126}, })) }) + It("should generate sane defaults for a /31 ipv4 subnet", func() { + subnetStr := "192.0.2.0/31" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.IP{192, 0, 2, 0}, + RangeEnd: net.IP{192, 0, 2, 1}, + })) + }) It("should reject ipv4 subnet using a masked address", func() { subnetStr := "192.0.2.12/24" r := allocator.Range{Subnet: mustSubnet(subnetStr)} @@ -98,10 +111,24 @@ var _ = Describe("IP ranges", func() { })) }) + It("should generate sane defaults for /127 ipv6 prefix", func() { + subnetStr := "2001:DB8:1::/127" + r := allocator.Range{Subnet: mustSubnet(subnetStr)} + + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.ParseIP("2001:DB8:1::0"), + RangeEnd: net.ParseIP("2001:DB8:1::1"), + })) + }) + It("Should reject a network that's too small", func() { - r := allocator.Range{Subnet: mustSubnet("192.0.2.0/31")} + r := allocator.Range{Subnet: mustSubnet("192.0.2.0/32")} err := r.Canonicalize() - Expect(err).Should(MatchError("network 192.0.2.0/31 too small to allocate from")) + Expect(err).Should(MatchError("network 192.0.2.0/32 too small to allocate from")) }) It("should reject invalid RangeStart and RangeEnd specifications", func() { @@ -123,6 +150,25 @@ var _ = Describe("IP ranges", func() { Expect(err).Should(MatchError("RangeStart 192.0.2.50 not in network 192.0.2.0/24")) }) + It("should reject invalid RangeStart and RangeEnd for point to point networks", func() { + subnetStr := "192.0.2.2/31" + r := allocator.Range{Subnet: mustSubnet(subnetStr), RangeStart: net.ParseIP("192.0.2.1")} + err := r.Canonicalize() + Expect(err).Should(MatchError("RangeStart 192.0.2.1 not in network 192.0.2.2/31")) + + r = allocator.Range{Subnet: mustSubnet(subnetStr), RangeEnd: net.ParseIP("192.0.2.4")} + err = r.Canonicalize() + Expect(err).Should(MatchError("RangeEnd 192.0.2.4 not in network 192.0.2.2/31")) + + r = allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.ParseIP("192.0.2.3"), + RangeEnd: net.ParseIP("192.0.2.2"), + } + err = r.Canonicalize() + Expect(err).Should(MatchError("RangeStart 192.0.2.3 not in network 192.0.2.2/31")) + }) + It("should parse all fields correctly", func() { subnetStr := "192.0.2.0/24" r := allocator.Range{ @@ -142,6 +188,40 @@ var _ = Describe("IP ranges", func() { })) }) + It("should parse /31 and /127 correctly", func() { + subnetStr := "192.0.2.2/31" + r := allocator.Range{ + Subnet: mustSubnet(subnetStr), + RangeStart: net.ParseIP("192.0.2.2"), + RangeEnd: net.ParseIP("192.0.2.3"), + Gateway: net.ParseIP("192.0.2.3"), + } + Expect(r.Canonicalize()).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: networkSubnet(subnetStr), + RangeStart: net.IP{192, 0, 2, 2}, + RangeEnd: net.IP{192, 0, 2, 3}, + Gateway: net.IP{192, 0, 2, 3}, + })) + + subnetV6Str := "2001:DB8:1::4/127" + r = allocator.Range{ + Subnet: mustSubnet(subnetV6Str), + RangeStart: net.ParseIP("2001:DB8:1::4"), + RangeEnd: net.ParseIP("2001:DB8:1::5"), + Gateway: net.ParseIP("2001:DB8:1::4"), + } + Expect(r.Canonicalize()).NotTo(HaveOccurred()) + + Expect(r).To(Equal(allocator.Range{ + Subnet: mustSubnet(subnetV6Str), + RangeStart: net.ParseIP("2001:DB8:1::4"), + RangeEnd: net.ParseIP("2001:DB8:1::5"), + Gateway: net.ParseIP("2001:DB8:1::4"), + })) + }) + It("should accept v4 IPs in range and reject IPs out of range", func() { r := allocator.Range{ Subnet: mustSubnet("192.0.2.0/24"), @@ -160,6 +240,34 @@ var _ = Describe("IP ranges", func() { Expect(r.Contains(net.ParseIP("192.0.2.51"))).Should(BeFalse()) }) + It("/31 network Contains", func() { + r := allocator.Range{ + Subnet: mustSubnet("192.0.2.2/31"), + } + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r.Contains(net.ParseIP("192.0.2.0"))).Should(BeFalse()) + Expect(r.Contains(net.ParseIP("192.0.2.1"))).Should(BeFalse()) + Expect(r.Contains(net.ParseIP("192.0.2.2"))).Should(BeTrue()) + Expect(r.Contains(net.ParseIP("192.0.2.3"))).Should(BeTrue()) + Expect(r.Contains(net.ParseIP("192.0.2.4"))).Should(BeFalse()) + }) + + It("/127 network Contains", func() { + r := allocator.Range{ + Subnet: mustSubnet("2001:DB8:1::2/127"), + } + err := r.Canonicalize() + Expect(err).NotTo(HaveOccurred()) + + Expect(r.Contains(net.ParseIP("2001:DB8:1::0"))).Should(BeFalse()) + Expect(r.Contains(net.ParseIP("2001:DB8:1::1"))).Should(BeFalse()) + Expect(r.Contains(net.ParseIP("2001:DB8:1::2"))).Should(BeTrue()) + Expect(r.Contains(net.ParseIP("2001:DB8:1::3"))).Should(BeTrue()) + Expect(r.Contains(net.ParseIP("2001:DB8:1::4"))).Should(BeFalse()) + }) + It("should accept v6 IPs in range and reject IPs out of range", func() { r := allocator.Range{ Subnet: mustSubnet("2001:DB8:1::/64"), diff --git a/pkg/ipam-node/cleaner/cleaner.go b/pkg/ipam-node/cleaner/cleaner.go index a2c1f4f..b1d79e5 100644 --- a/pkg/ipam-node/cleaner/cleaner.go +++ b/pkg/ipam-node/cleaner/cleaner.go @@ -71,7 +71,7 @@ type cleaner struct { } func (c *cleaner) Start(ctx context.Context) { - logger := logr.FromContextOrDiscard(ctx).WithName("cleaner") + logger := logr.FromContextOrDiscard(ctx) for { loopLogger := logger.WithValues("checkID", uuid.NewString()) loopLogger.Info("check for stale IPs") @@ -96,17 +96,17 @@ func (c *cleaner) loop(ctx context.Context) error { } allReservations := map[string]struct{}{} emptyPools := []string{} - for _, poolName := range session.ListPools() { - poolReservations := session.ListReservations(poolName) + for _, poolKey := range session.ListPools() { + poolReservations := session.ListReservations(poolKey) if len(poolReservations) == 0 { - emptyPools = append(emptyPools, poolName) + emptyPools = append(emptyPools, poolKey) continue } for _, reservation := range poolReservations { - resLogger := logger.WithValues("pool", poolName, + resLogger := logger.WithValues("poolKey", poolKey, "container_id", reservation.ContainerID, "interface_name", reservation.InterfaceName) - key := c.getStaleAllocKey(poolName, reservation) - allReservations[key] = struct{}{} + staleAllocKey := c.getStaleAllocKey(poolKey, reservation) + allReservations[staleAllocKey] = struct{}{} if reservation.Metadata.PodName == "" || reservation.Metadata.PodNamespace == "" { resLogger.V(2).Info("reservation has no required metadata fields, skip") continue @@ -128,10 +128,10 @@ func (c *cleaner) loop(ctx context.Context) error { } } if found { - delete(c.staleAllocations, key) + delete(c.staleAllocations, staleAllocKey) } else { - c.staleAllocations[key]++ - resLogger.V(2).Info("pod not found, increase stale counter", "value", c.staleAllocations[key]) + c.staleAllocations[staleAllocKey]++ + resLogger.V(2).Info("pod not found, increase stale counter", "value", c.staleAllocations[staleAllocKey]) } } } @@ -145,15 +145,15 @@ func (c *cleaner) loop(ctx context.Context) error { // release reservations which were marked as stale multiple times if count > c.checkCountBeforeRelease { keyFields := strings.SplitN(k, "|", 3) - poolName, containerID, ifName := keyFields[0], keyFields[1], keyFields[2] - logger.Info("stale reservation released", "poolName", poolName, + poolKey, containerID, ifName := keyFields[0], keyFields[1], keyFields[2] + logger.Info("stale reservation released", "poolKey", poolKey, "container_id", containerID, "interface_name", ifName) - session.ReleaseReservationByID(poolName, containerID, ifName) + session.ReleaseReservationByID(poolKey, containerID, ifName) } } // remove empty pools if they don't have configuration in the k8s API for _, emptyPool := range emptyPools { - if p := c.poolConfReader.GetPoolByName(emptyPool); p == nil { + if p := c.poolConfReader.GetPoolByKey(emptyPool); p == nil { session.RemovePool(emptyPool) } } diff --git a/pkg/ipam-node/cleaner/cleaner_test.go b/pkg/ipam-node/cleaner/cleaner_test.go index 8cc56f5..bedb723 100644 --- a/pkg/ipam-node/cleaner/cleaner_test.go +++ b/pkg/ipam-node/cleaner/cleaner_test.go @@ -64,9 +64,9 @@ var _ = Describe("Cleaner", func() { poolManager := poolMockPkg.NewManager(GinkgoT()) // pool2 has no config in the k8s API - poolManager.On("GetPoolByName", testPool2).Return(nil) + poolManager.On("GetPoolByKey", testPool2).Return(nil) // pool3 has config in the k8s API - poolManager.On("GetPoolByName", testPool3).Return(&poolPkg.IPPool{}) + poolManager.On("GetPoolByKey", testPool3).Return(&poolPkg.Pool{}) session, err := store.Open(ctx) Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/ipam-node/controllers/cidrpool/cidrpool.go b/pkg/ipam-node/controllers/cidrpool/cidrpool.go new file mode 100644 index 0000000..de825b6 --- /dev/null +++ b/pkg/ipam-node/controllers/cidrpool/cidrpool.go @@ -0,0 +1,132 @@ +/* + Copyright 2023, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "context" + "net" + + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/ip" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" +) + +// CIDRPoolReconciler reconciles CIDRPool objects +type CIDRPoolReconciler struct { + PoolManager pool.Manager + client.Client + Scheme *runtime.Scheme + NodeName string +} + +// Reconcile contains logic to sync CIDRPool objects +func (r *CIDRPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqLog := log.FromContext(ctx) + cidrPool := &ipamv1alpha1.CIDRPool{} + poolKey := common.GetPoolKey(req.Name, common.PoolTypeCIDRPool) + err := r.Client.Get(ctx, req.NamespacedName, cidrPool) + if err != nil { + if apiErrors.IsNotFound(err) { + reqLog.Info("CIDRPool not found, removing from PoolManager") + r.PoolManager.RemovePool(poolKey) + return ctrl.Result{}, nil + } + reqLog.Error(err, "failed to get CIDRPool object from the cache") + return ctrl.Result{}, err + } + reqLog.Info("Notification on CIDRPool", "name", cidrPool.Name) + + if errList := cidrPool.Validate(); len(errList) > 0 { + reqLog.Info("CIDRPool has invalid config, ignore the pool", "name", + cidrPool.Name, "reason", errList.ToAggregate().Error()) + r.PoolManager.RemovePool(poolKey) + return ctrl.Result{}, nil + } + + found := false + for _, alloc := range cidrPool.Status.Allocations { + if alloc.NodeName == r.NodeName { + if errList := alloc.Validate(cidrPool); len(errList) > 0 { + reqLog.Info("CIDRPool has invalid allocation for the node, ignore the pool", + "name", cidrPool.Name, "reason", errList.ToAggregate().Error()) + r.PoolManager.RemovePool(poolKey) + return ctrl.Result{}, nil + } + _, nodeSubnet, _ := net.ParseCIDR(alloc.Prefix) + startIP := ip.NextIP(nodeSubnet.IP) + if ip.IsPointToPointSubnet(nodeSubnet) { + startIP = nodeSubnet.IP + } + endIP := ip.LastIP(nodeSubnet) + cidrPool := &pool.Pool{ + Name: cidrPool.Name, + Subnet: alloc.Prefix, + Gateway: alloc.Gateway, + StartIP: startIP.String(), + EndIP: endIP.String(), + Exclusions: buildExclusions(cidrPool.Spec.Exclusions, nodeSubnet, startIP, endIP), + } + reqLog.Info("CIDRPool config updated", "name", cidrPool.Name) + r.PoolManager.UpdatePool(poolKey, cidrPool) + found = true + break + } + } + if !found { + reqLog.Info("CIDRPool config removed", "name", cidrPool.Name, "reason", "allocation not found") + r.PoolManager.RemovePool(poolKey) + } + return ctrl.Result{}, nil +} + +func buildExclusions(ranges []ipamv1alpha1.ExcludeRange, + nodeSubnet *net.IPNet, firstIP net.IP, lastIP net.IP) []pool.ExclusionRange { + exclusions := make([]pool.ExclusionRange, 0, len(ranges)) + for _, r := range ranges { + rangeStartIP := net.ParseIP(r.StartIP) + rangeEndIP := net.ParseIP(r.EndIP) + if rangeEndIP == nil || rangeStartIP == nil { + continue + } + containsStart := nodeSubnet.Contains(rangeStartIP) + containsEnd := nodeSubnet.Contains(rangeEndIP) + if !containsStart && !containsEnd { + // the range is not related to the node's subnet, + continue + } + exlRange := pool.ExclusionRange{StartIP: r.StartIP, EndIP: r.EndIP} + if !containsStart { + exlRange.StartIP = firstIP.String() + } + if !containsEnd { + exlRange.EndIP = lastIP.String() + } + exclusions = append(exclusions, exlRange) + } + return exclusions +} + +// SetupWithManager sets up the controller with the Manager. +func (r *CIDRPoolReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&ipamv1alpha1.CIDRPool{}). + Complete(r) +} diff --git a/pkg/ipam-node/controllers/cidrpool/cidrpool_suite_test.go b/pkg/ipam-node/controllers/cidrpool/cidrpool_suite_test.go new file mode 100644 index 0000000..3965762 --- /dev/null +++ b/pkg/ipam-node/controllers/cidrpool/cidrpool_suite_test.go @@ -0,0 +1,26 @@ +/* + Copyright 2024, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestController(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Controller suite") +} diff --git a/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go b/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go new file mode 100644 index 0000000..6da5667 --- /dev/null +++ b/pkg/ipam-node/controllers/cidrpool/cidrpool_test.go @@ -0,0 +1,70 @@ +/* + Copyright 2024, NVIDIA CORPORATION & AFFILIATES + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package controllers + +import ( + "net" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" +) + +var _ = Describe("CIDRPool", func() { + DescribeTable("buildExclusions", + func(exclusions []ipamv1alpha1.ExcludeRange, nodeSubnet string, firstIP string, lastIP string, result []pool.ExclusionRange) { + _, subnet, _ := net.ParseCIDR(nodeSubnet) + first := net.ParseIP(firstIP) + last := net.ParseIP(lastIP) + Expect(buildExclusions(exclusions, subnet, first, last)).To(Equal(result)) + }, + Entry("start and end are part of the subnet", + []ipamv1alpha1.ExcludeRange{ + {StartIP: "192.168.0.10", EndIP: "192.168.0.20"}, + {StartIP: "192.168.0.30", EndIP: "192.168.0.40"}}, + "192.168.0.0/24", "192.168.0.1", "192.168.0.254", + []pool.ExclusionRange{ + {StartIP: "192.168.0.10", EndIP: "192.168.0.20"}, + {StartIP: "192.168.0.30", EndIP: "192.168.0.40"}, + }, + ), + Entry("start and end are out of the subnet", + []ipamv1alpha1.ExcludeRange{ + {StartIP: "192.168.100.10", EndIP: "192.168.100.20"}, + {StartIP: "192.168.0.30", EndIP: "192.168.0.40"}}, + "192.168.0.0/24", "192.168.0.1", "192.168.0.254", + []pool.ExclusionRange{ + {StartIP: "192.168.0.30", EndIP: "192.168.0.40"}, + }, + ), + Entry("start is out of the subnet", + []ipamv1alpha1.ExcludeRange{ + {StartIP: "192.168.0.30", EndIP: "192.168.1.40"}}, + "192.168.1.0/24", "192.168.1.1", "192.168.1.254", + []pool.ExclusionRange{ + {StartIP: "192.168.1.1", EndIP: "192.168.1.40"}, + }, + ), + Entry("end is out of the subnet", + []ipamv1alpha1.ExcludeRange{ + {StartIP: "192.168.1.30", EndIP: "192.168.2.40"}}, + "192.168.1.0/24", "192.168.1.1", "192.168.1.254", + []pool.ExclusionRange{ + {StartIP: "192.168.1.30", EndIP: "192.168.1.254"}, + }, + ), + ) +}) diff --git a/pkg/ipam-node/controllers/ippool/ippool.go b/pkg/ipam-node/controllers/ippool/ippool.go index 139ab9e..5ed6609 100644 --- a/pkg/ipam-node/controllers/ippool/ippool.go +++ b/pkg/ipam-node/controllers/ippool/ippool.go @@ -23,6 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ipamv1alpha1 "github.com/Mellanox/nvidia-k8s-ipam/api/v1alpha1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" ) @@ -38,11 +39,12 @@ type IPPoolReconciler struct { func (r *IPPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLog := log.FromContext(ctx) ipPool := &ipamv1alpha1.IPPool{} + poolKey := common.GetPoolKey(req.Name, common.PoolTypeIPPool) err := r.Client.Get(ctx, req.NamespacedName, ipPool) if err != nil { if apiErrors.IsNotFound(err) { reqLog.Info("IPPool not found, removing from PoolManager") - r.PoolManager.RemovePool(req.Name) + r.PoolManager.RemovePool(poolKey) return ctrl.Result{}, nil } reqLog.Error(err, "failed to get IPPool object from the cache") @@ -50,22 +52,23 @@ func (r *IPPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } reqLog.Info("Notification on IPPool", "name", ipPool.Name) found := false + for _, alloc := range ipPool.Status.Allocations { if alloc.NodeName == r.NodeName { - ipPool := &pool.IPPool{ + ipPool := &pool.Pool{ Name: ipPool.Name, Subnet: ipPool.Spec.Subnet, Gateway: ipPool.Spec.Gateway, StartIP: alloc.StartIP, EndIP: alloc.EndIP, } - r.PoolManager.UpdatePool(ipPool) + r.PoolManager.UpdatePool(poolKey, ipPool) found = true break } } if !found { - r.PoolManager.RemovePool(req.Name) + r.PoolManager.RemovePool(poolKey) } return ctrl.Result{}, nil } diff --git a/pkg/ipam-node/handlers/allocate.go b/pkg/ipam-node/handlers/allocate.go index d411863..e08137c 100644 --- a/pkg/ipam-node/handlers/allocate.go +++ b/pkg/ipam-node/handlers/allocate.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/status" nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" @@ -40,7 +41,7 @@ func (h *Handlers) Allocate(ctx context.Context, req *nodev1.AllocateRequest) (* if err := validateReq(req); err != nil { return nil, err } - params := req.Parameters + params := setDefaultsToParams(req.Parameters) store, err := h.openStore(ctx) if err != nil { return nil, err @@ -55,8 +56,9 @@ func (h *Handlers) Allocate(ctx context.Context, req *nodev1.AllocateRequest) (* resp := &nodev1.AllocateResponse{} for _, r := range result { allocationInfo := &nodev1.AllocationInfo{ - Pool: r.Pool, - Ip: r.Address.String(), + Pool: r.Pool, + Ip: r.Address.String(), + PoolType: params.PoolType, } if r.Gateway != nil { allocationInfo.Gateway = r.Gateway.String() @@ -76,9 +78,9 @@ func (h *Handlers) allocate(reqLog logr.Logger, session storePkg.Session, params *nodev1.IPAMParameters) ([]PoolAlloc, error) { var err error result := make([]PoolAlloc, 0, len(params.Pools)) - for _, pool := range params.Pools { + for _, poolName := range params.Pools { var alloc PoolAlloc - alloc, err = h.allocateInPool(pool, reqLog, session, params) + alloc, err = h.allocateInPool(poolName, reqLog, session, params) if err != nil { break } @@ -91,25 +93,28 @@ func (h *Handlers) allocate(reqLog logr.Logger, return result, nil } -func (h *Handlers) allocateInPool(pool string, reqLog logr.Logger, +func (h *Handlers) allocateInPool(poolName string, reqLog logr.Logger, session storePkg.Session, params *nodev1.IPAMParameters) (PoolAlloc, error) { - poolLog := reqLog.WithValues("pool", pool) + poolType := poolTypeAsString(params.PoolType) + poolLog := reqLog.WithValues("pool", poolName, "poolType", poolType) + poolKey := common.GetPoolKey(poolName, poolType) - poolCfg := h.poolConfReader.GetPoolByName(pool) + poolCfg := h.poolConfReader.GetPoolByKey(poolKey) if poolCfg == nil { - return PoolAlloc{}, status.Errorf(codes.NotFound, "configuration for pool %s not found", pool) + return PoolAlloc{}, status.Errorf(codes.NotFound, + "configuration for pool \"%s\", poolType \"%s\" not found", poolName, poolType) } rangeStart := net.ParseIP(poolCfg.StartIP) if rangeStart == nil { - return PoolAlloc{}, poolCfgError(poolLog, pool, "invalid rangeStart") + return PoolAlloc{}, poolCfgError(poolLog, poolName, poolType, "invalid rangeStart") } rangeEnd := net.ParseIP(poolCfg.EndIP) if rangeEnd == nil { - return PoolAlloc{}, poolCfgError(poolLog, pool, "invalid rangeEnd") + return PoolAlloc{}, poolCfgError(poolLog, poolName, poolType, "invalid rangeEnd") } _, subnet, err := net.ParseCIDR(poolCfg.Subnet) if err != nil || subnet == nil || subnet.IP == nil || subnet.Mask == nil { - return PoolAlloc{}, poolCfgError(poolLog, pool, "invalid subnet") + return PoolAlloc{}, poolCfgError(poolLog, poolName, poolType, "invalid subnet") } rangeSet := &allocator.RangeSet{allocator.Range{ RangeStart: rangeStart, @@ -118,10 +123,24 @@ func (h *Handlers) allocateInPool(pool string, reqLog logr.Logger, Gateway: net.ParseIP(poolCfg.Gateway), }} if err := rangeSet.Canonicalize(); err != nil { - return PoolAlloc{}, poolCfgError(poolLog, pool, + return PoolAlloc{}, poolCfgError(poolLog, poolName, poolType, fmt.Sprintf("invalid range config: %s", err.Error())) } - alloc := h.getAllocFunc(rangeSet, pool, session) + exclusionRangeSet := make(allocator.RangeSet, 0, len(poolCfg.Exclusions)) + for _, e := range poolCfg.Exclusions { + exclusionRangeSet = append(exclusionRangeSet, allocator.Range{ + Subnet: cniTypes.IPNet(*subnet), + RangeStart: net.ParseIP(e.StartIP), + RangeEnd: net.ParseIP(e.EndIP), + }) + } + if len(exclusionRangeSet) > 0 { + if err := exclusionRangeSet.Canonicalize(); err != nil { + return PoolAlloc{}, poolCfgError(poolLog, poolName, poolType, + fmt.Sprintf("invalid exclusion range config: %s", err.Error())) + } + } + alloc := h.getAllocFunc(rangeSet, &exclusionRangeSet, poolKey, session) allocMeta := types.ReservationMetadata{ CreateTime: time.Now().Format(time.RFC3339Nano), PoolConfigSnapshot: poolCfg.String(), @@ -137,23 +156,26 @@ func (h *Handlers) allocateInPool(pool string, reqLog logr.Logger, poolLog.Error(err, "failed to allocate IP address") if errors.Is(err, storePkg.ErrReservationAlreadyExist) { return PoolAlloc{}, status.Errorf(codes.AlreadyExists, - "allocation already exist in the pool %s", pool) + "allocation already exist in the pool \"%s\", poolType \"%s\"", poolName, poolType) } if errors.Is(err, allocator.ErrNoFreeAddresses) { - return PoolAlloc{}, status.Errorf(codes.ResourceExhausted, "no free addresses in the pool %s", pool) + return PoolAlloc{}, status.Errorf(codes.ResourceExhausted, + "no free addresses in the pool \"%s\", poolType \"%s\"", + poolName, poolType) } - return PoolAlloc{}, status.Errorf(codes.Internal, "failed to allocate IP address in pool %s", pool) + return PoolAlloc{}, status.Errorf(codes.Internal, + "failed to allocate IP address in pool \"%s\", poolType \"%s\"", poolName, poolType) } poolLog.Info("IP address allocated", "allocation", result.String()) return PoolAlloc{ - Pool: pool, + Pool: poolName, IPConfig: result, }, nil } -func poolCfgError(reqLog logr.Logger, pool, reason string) error { - reqLog.Error(nil, "invalid pool config", "pool", pool, +func poolCfgError(reqLog logr.Logger, pool, poolType, reason string) error { + reqLog.Error(nil, "invalid pool config", "pool", pool, "poolType", poolType, "reason", reason) - return status.Errorf(codes.Internal, "invalid config for pool %s", pool) + return status.Errorf(codes.Internal, "invalid config for pool \"%s\", poolType \"%s\"", pool, poolType) } diff --git a/pkg/ipam-node/handlers/deallocate.go b/pkg/ipam-node/handlers/deallocate.go index 015fefa..5bde9e3 100644 --- a/pkg/ipam-node/handlers/deallocate.go +++ b/pkg/ipam-node/handlers/deallocate.go @@ -19,6 +19,7 @@ import ( "github.com/go-logr/logr" nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" ) // Deallocate is the handler for Deallocate GRPC endpoint @@ -29,7 +30,7 @@ func (h *Handlers) Deallocate( if err := validateReq(req); err != nil { return nil, err } - params := req.Parameters + params := setDefaultsToParams(req.Parameters) store, err := h.openStore(ctx) if err != nil { return nil, err @@ -37,8 +38,9 @@ func (h *Handlers) Deallocate( if err := checkReqIsCanceled(ctx); err != nil { return nil, h.closeSession(ctx, store, err) } - for _, p := range params.Pools { - store.ReleaseReservationByID(p, params.CniContainerid, params.CniIfname) + poolType := poolTypeAsString(params.PoolType) + for _, poolName := range params.Pools { + store.ReleaseReservationByID(common.GetPoolKey(poolName, poolType), params.CniContainerid, params.CniIfname) } if err := h.closeSession(ctx, store, nil); err != nil { return nil, err diff --git a/pkg/ipam-node/handlers/handlers.go b/pkg/ipam-node/handlers/handlers.go index c01cfd4..3298e94 100644 --- a/pkg/ipam-node/handlers/handlers.go +++ b/pkg/ipam-node/handlers/handlers.go @@ -22,12 +22,14 @@ import ( "google.golang.org/grpc/status" nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator" storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" poolPkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" ) -type GetAllocatorFunc = func(s *allocator.RangeSet, poolName string, session storePkg.Session) allocator.IPAllocator +type GetAllocatorFunc = func(s *allocator.RangeSet, exclusions *allocator.RangeSet, + poolKey string, session storePkg.Session) allocator.IPAllocator // New create and initialize new instance of grpc Handlers func New(poolConfReader poolPkg.ConfigReader, store storePkg.Store, getAllocFunc GetAllocatorFunc) *Handlers { @@ -114,12 +116,20 @@ func validateReq(req paramsGetter) error { return nil } +func setDefaultsToParams(params *nodev1.IPAMParameters) *nodev1.IPAMParameters { + if params.PoolType == nodev1.PoolType_POOL_TYPE_UNSPECIFIED { + params.PoolType = nodev1.PoolType_POOL_TYPE_IPPOOL + } + return params +} + func addFieldsToLogger(log logr.Logger, req paramsGetter) logr.Logger { params := req.GetParameters() if params == nil { return log } return log.WithValues("pools", params.Pools, + "pool_type", params.PoolType, "container_id", params.CniContainerid, "interface_name", params.CniIfname, "meta", params.Metadata.String(), @@ -134,3 +144,12 @@ func checkReqIsCanceled(ctx context.Context) error { return nil } } + +// converts poolType from grpc request to string representations, +// default/fallback value is IPPool +func poolTypeAsString(poolType nodev1.PoolType) string { + if poolType == nodev1.PoolType_POOL_TYPE_CIDRPOOL { + return common.PoolTypeCIDRPool + } + return common.PoolTypeIPPool +} diff --git a/pkg/ipam-node/handlers/handlers_test.go b/pkg/ipam-node/handlers/handlers_test.go index d75ae56..191470c 100644 --- a/pkg/ipam-node/handlers/handlers_test.go +++ b/pkg/ipam-node/handlers/handlers_test.go @@ -45,8 +45,8 @@ const ( testPodUID = "aaf0a0fc-9869-41ef-9214-48599f85b4fa" ) -func getPoolConfigs() map[string]*pool.IPPool { - return map[string]*pool.IPPool{ +func getPoolConfigs() map[string]*pool.Pool { + return map[string]*pool.Pool{ testPoolName1: { Name: testPoolName1, Subnet: "192.168.0.0/16", @@ -95,7 +95,8 @@ var _ = Describe("Handlers", func() { allocators = map[string]*allocatorMockPkg.IPAllocator{ testPoolName1: allocatorMockPkg.NewIPAllocator(GinkgoT()), testPoolName2: allocatorMockPkg.NewIPAllocator(GinkgoT())} - getAllocFunc = func(s *allocatorPkg.RangeSet, poolName string, store storePkg.Session) allocatorPkg.IPAllocator { + getAllocFunc = func(s *allocatorPkg.RangeSet, exclusions *allocatorPkg.RangeSet, + poolName string, store storePkg.Session) allocatorPkg.IPAllocator { return allocators[poolName] } handlers = handlersPkg.New(poolManager, store, getAllocFunc) @@ -103,8 +104,8 @@ var _ = Describe("Handlers", func() { It("Allocate succeed", func() { store.On("Open", mock.Anything).Return(session, nil) - poolManager.On("GetPoolByName", testPoolName1).Return(getPoolConfigs()[testPoolName1]) - poolManager.On("GetPoolByName", testPoolName2).Return(getPoolConfigs()[testPoolName2]) + poolManager.On("GetPoolByKey", testPoolName1).Return(getPoolConfigs()[testPoolName1]) + poolManager.On("GetPoolByKey", testPoolName2).Return(getPoolConfigs()[testPoolName2]) allocators[testPoolName1].On("Allocate", "id1", "net0", mock.Anything).Return( ¤t.IPConfig{ Gateway: net.ParseIP("192.168.0.1"), @@ -135,7 +136,7 @@ var _ = Describe("Handlers", func() { }) It("Allocation failed: unknown pool", func() { store.On("Open", mock.Anything).Return(session, nil) - poolManager.On("GetPoolByName", testPoolName1).Return(nil) + poolManager.On("GetPoolByKey", testPoolName1).Return(nil) session.On("Cancel").Return() _, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) Expect(status.Code(err) == codes.NotFound).To(BeTrue()) @@ -147,15 +148,15 @@ var _ = Describe("Handlers", func() { startIP := pool1Cfg.StartIP pool1Cfg.StartIP = endIP pool1Cfg.EndIP = startIP - poolManager.On("GetPoolByName", testPoolName1).Return(pool1Cfg) + poolManager.On("GetPoolByKey", testPoolName1).Return(pool1Cfg) session.On("Cancel").Return() _, err := handlers.Allocate(ctx, &nodev1.AllocateRequest{Parameters: getValidIPAMParams()}) Expect(status.Code(err) == codes.Internal).To(BeTrue()) }) It("Allocation failed: pool2 has no free IPs", func() { store.On("Open", mock.Anything).Return(session, nil) - poolManager.On("GetPoolByName", testPoolName1).Return(getPoolConfigs()[testPoolName1]) - poolManager.On("GetPoolByName", testPoolName2).Return(getPoolConfigs()[testPoolName2]) + poolManager.On("GetPoolByKey", testPoolName1).Return(getPoolConfigs()[testPoolName1]) + poolManager.On("GetPoolByKey", testPoolName2).Return(getPoolConfigs()[testPoolName2]) allocators[testPoolName1].On("Allocate", "id1", "net0", mock.Anything).Return( ¤t.IPConfig{ Gateway: net.ParseIP("192.168.0.1"), @@ -170,7 +171,7 @@ var _ = Describe("Handlers", func() { }) It("Allocation failed: already allocated", func() { store.On("Open", mock.Anything).Return(session, nil) - poolManager.On("GetPoolByName", testPoolName1).Return(getPoolConfigs()[testPoolName1]) + poolManager.On("GetPoolByKey", testPoolName1).Return(getPoolConfigs()[testPoolName1]) allocators[testPoolName1].On("Allocate", "id1", "net0", mock.Anything).Return( nil, storePkg.ErrReservationAlreadyExist) session.On("Cancel").Return() @@ -179,8 +180,8 @@ var _ = Describe("Handlers", func() { }) It("Allocation failed: failed to commit", func() { store.On("Open", mock.Anything).Return(session, nil) - poolManager.On("GetPoolByName", testPoolName1).Return(getPoolConfigs()[testPoolName1]) - poolManager.On("GetPoolByName", testPoolName2).Return(getPoolConfigs()[testPoolName2]) + poolManager.On("GetPoolByKey", testPoolName1).Return(getPoolConfigs()[testPoolName1]) + poolManager.On("GetPoolByKey", testPoolName2).Return(getPoolConfigs()[testPoolName2]) allocators[testPoolName1].On("Allocate", "id1", "net0", mock.Anything).Return( ¤t.IPConfig{ Gateway: net.ParseIP("192.168.0.1"), diff --git a/pkg/ipam-node/handlers/isallocated.go b/pkg/ipam-node/handlers/isallocated.go index cd7cb06..a4099ef 100644 --- a/pkg/ipam-node/handlers/isallocated.go +++ b/pkg/ipam-node/handlers/isallocated.go @@ -21,6 +21,7 @@ import ( "google.golang.org/grpc/status" nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" ) // IsAllocated is the handler for IsAllocated GRPC endpoint @@ -31,7 +32,7 @@ func (h *Handlers) IsAllocated( if err := validateReq(req); err != nil { return nil, err } - params := req.Parameters + params := setDefaultsToParams(req.Parameters) store, err := h.openStore(ctx) if err != nil { return nil, err @@ -39,13 +40,13 @@ func (h *Handlers) IsAllocated( if err := checkReqIsCanceled(ctx); err != nil { return nil, h.closeSession(ctx, store, err) } - - for _, p := range params.Pools { - poolLog := reqLog.WithValues("pool", p) - res := store.GetReservationByID(p, params.CniContainerid, params.CniIfname) + poolType := poolTypeAsString(params.PoolType) + for _, poolName := range params.Pools { + poolLog := reqLog.WithValues("pool", poolName, "poolType", poolType) + res := store.GetReservationByID(common.GetPoolKey(poolName, poolType), params.CniContainerid, params.CniIfname) if res == nil { poolLog.Info("reservation not found") - err = status.Errorf(codes.NotFound, "reservation for pool %s not found", p) + err = status.Errorf(codes.NotFound, "reservation for pool \"%s\", poolType \"%s\" not found", poolName, poolType) break } reqLog.Info("reservation exist") diff --git a/pkg/ipam-node/migrator/migrator.go b/pkg/ipam-node/migrator/migrator.go index daee30e..f77f1ba 100644 --- a/pkg/ipam-node/migrator/migrator.go +++ b/pkg/ipam-node/migrator/migrator.go @@ -25,6 +25,7 @@ import ( "github.com/go-logr/logr" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ip" storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/types" @@ -140,14 +141,15 @@ func getWalkFunc(logger logr.Logger, session storePkg.Session) filepath.WalkFunc return fmt.Errorf("unexpected allocation format") } containerID, interfaceName := strings.Trim(allocData[0], "\r"), allocData[1] - if err := session.Reserve(poolName, containerID, interfaceName, types.ReservationMetadata{ - CreateTime: time.Now().Format(time.RFC3339Nano), - PodUUID: PlaceholderForUnknownField, - PodName: PlaceholderForUnknownField, - PodNamespace: PlaceholderForUnknownField, - DeviceID: PlaceholderForUnknownField, - PoolConfigSnapshot: PlaceholderForUnknownField, - }, addr); err != nil { + if err := session.Reserve(common.GetPoolKey(poolName, common.PoolTypeIPPool), + containerID, interfaceName, types.ReservationMetadata{ + CreateTime: time.Now().Format(time.RFC3339Nano), + PodUUID: PlaceholderForUnknownField, + PodName: PlaceholderForUnknownField, + PodNamespace: PlaceholderForUnknownField, + DeviceID: PlaceholderForUnknownField, + PoolConfigSnapshot: PlaceholderForUnknownField, + }, addr); err != nil { logger.V(1).Info("failed to reserve IP, ignore allocation", "pool", poolName, "ip", info.Name(), "reason", err.Error()) // ignore reservation error and skip the reservation diff --git a/pkg/ipam-node/migrator/migrator_test.go b/pkg/ipam-node/migrator/migrator_test.go index 1e04d61..e051146 100644 --- a/pkg/ipam-node/migrator/migrator_test.go +++ b/pkg/ipam-node/migrator/migrator_test.go @@ -25,6 +25,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/klog/v2" + "github.com/Mellanox/nvidia-k8s-ipam/pkg/common" "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/migrator" storePkg "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/store" ) @@ -84,10 +85,10 @@ var _ = Describe("Migrator", func() { defer session.Cancel() Expect(err).NotTo(HaveOccurred()) - Expect(session.GetLastReservedIP(testPool1)).NotTo(BeNil()) - Expect(session.GetLastReservedIP(testPool2)).NotTo(BeNil()) + Expect(session.GetLastReservedIP(common.GetPoolKey(testPool1, common.PoolTypeIPPool))).NotTo(BeNil()) + Expect(session.GetLastReservedIP(common.GetPoolKey(testPool2, common.PoolTypeIPPool))).NotTo(BeNil()) - reservationPool1 := session.GetReservationByID(testPool1, testID1, testIF1) + reservationPool1 := session.GetReservationByID(common.GetPoolKey(testPool1, common.PoolTypeIPPool), testID1, testIF1) Expect(reservationPool1).NotTo(BeNil()) Expect(reservationPool1.ContainerID).To(Equal(testID1)) Expect(reservationPool1.InterfaceName).To(Equal(testIF1)) @@ -96,7 +97,7 @@ var _ = Describe("Migrator", func() { Expect(reservationPool1.Metadata.PodNamespace).To(Equal(migrator.PlaceholderForUnknownField)) Expect(reservationPool1.Metadata.PodUUID).To(Equal(migrator.PlaceholderForUnknownField)) - reservationPool2 := session.GetReservationByID(testPool2, testID1, testIF2) + reservationPool2 := session.GetReservationByID(common.GetPoolKey(testPool2, common.PoolTypeIPPool), testID1, testIF2) Expect(reservationPool2).NotTo(BeNil()) // check that host local store is removed diff --git a/pkg/ipam-node/store/mocks/Session.go b/pkg/ipam-node/store/mocks/Session.go index ff07eeb..87fd7a7 100644 --- a/pkg/ipam-node/store/mocks/Session.go +++ b/pkg/ipam-node/store/mocks/Session.go @@ -96,13 +96,13 @@ func (_c *Session_Commit_Call) RunAndReturn(run func() error) *Session_Commit_Ca return _c } -// GetLastReservedIP provides a mock function with given fields: pool -func (_m *Session) GetLastReservedIP(pool string) net.IP { - ret := _m.Called(pool) +// GetLastReservedIP provides a mock function with given fields: poolKey +func (_m *Session) GetLastReservedIP(poolKey string) net.IP { + ret := _m.Called(poolKey) var r0 net.IP if rf, ok := ret.Get(0).(func(string) net.IP); ok { - r0 = rf(pool) + r0 = rf(poolKey) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(net.IP) @@ -118,12 +118,12 @@ type Session_GetLastReservedIP_Call struct { } // GetLastReservedIP is a helper method to define mock.On call -// - pool string -func (_e *Session_Expecter) GetLastReservedIP(pool interface{}) *Session_GetLastReservedIP_Call { - return &Session_GetLastReservedIP_Call{Call: _e.mock.On("GetLastReservedIP", pool)} +// - poolKey string +func (_e *Session_Expecter) GetLastReservedIP(poolKey interface{}) *Session_GetLastReservedIP_Call { + return &Session_GetLastReservedIP_Call{Call: _e.mock.On("GetLastReservedIP", poolKey)} } -func (_c *Session_GetLastReservedIP_Call) Run(run func(pool string)) *Session_GetLastReservedIP_Call { +func (_c *Session_GetLastReservedIP_Call) Run(run func(poolKey string)) *Session_GetLastReservedIP_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(string)) }) @@ -140,13 +140,13 @@ func (_c *Session_GetLastReservedIP_Call) RunAndReturn(run func(string) net.IP) return _c } -// GetReservationByID provides a mock function with given fields: pool, id, ifName -func (_m *Session) GetReservationByID(pool string, id string, ifName string) *types.Reservation { - ret := _m.Called(pool, id, ifName) +// GetReservationByID provides a mock function with given fields: poolKey, id, ifName +func (_m *Session) GetReservationByID(poolKey string, id string, ifName string) *types.Reservation { + ret := _m.Called(poolKey, id, ifName) var r0 *types.Reservation if rf, ok := ret.Get(0).(func(string, string, string) *types.Reservation); ok { - r0 = rf(pool, id, ifName) + r0 = rf(poolKey, id, ifName) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.Reservation) @@ -162,14 +162,14 @@ type Session_GetReservationByID_Call struct { } // GetReservationByID is a helper method to define mock.On call -// - pool string +// - poolKey string // - id string // - ifName string -func (_e *Session_Expecter) GetReservationByID(pool interface{}, id interface{}, ifName interface{}) *Session_GetReservationByID_Call { - return &Session_GetReservationByID_Call{Call: _e.mock.On("GetReservationByID", pool, id, ifName)} +func (_e *Session_Expecter) GetReservationByID(poolKey interface{}, id interface{}, ifName interface{}) *Session_GetReservationByID_Call { + return &Session_GetReservationByID_Call{Call: _e.mock.On("GetReservationByID", poolKey, id, ifName)} } -func (_c *Session_GetReservationByID_Call) Run(run func(pool string, id string, ifName string)) *Session_GetReservationByID_Call { +func (_c *Session_GetReservationByID_Call) Run(run func(poolKey string, id string, ifName string)) *Session_GetReservationByID_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(string), args[1].(string), args[2].(string)) }) @@ -229,13 +229,13 @@ func (_c *Session_ListPools_Call) RunAndReturn(run func() []string) *Session_Lis return _c } -// ListReservations provides a mock function with given fields: pool -func (_m *Session) ListReservations(pool string) []types.Reservation { - ret := _m.Called(pool) +// ListReservations provides a mock function with given fields: poolKey +func (_m *Session) ListReservations(poolKey string) []types.Reservation { + ret := _m.Called(poolKey) var r0 []types.Reservation if rf, ok := ret.Get(0).(func(string) []types.Reservation); ok { - r0 = rf(pool) + r0 = rf(poolKey) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]types.Reservation) @@ -251,12 +251,12 @@ type Session_ListReservations_Call struct { } // ListReservations is a helper method to define mock.On call -// - pool string -func (_e *Session_Expecter) ListReservations(pool interface{}) *Session_ListReservations_Call { - return &Session_ListReservations_Call{Call: _e.mock.On("ListReservations", pool)} +// - poolKey string +func (_e *Session_Expecter) ListReservations(poolKey interface{}) *Session_ListReservations_Call { + return &Session_ListReservations_Call{Call: _e.mock.On("ListReservations", poolKey)} } -func (_c *Session_ListReservations_Call) Run(run func(pool string)) *Session_ListReservations_Call { +func (_c *Session_ListReservations_Call) Run(run func(poolKey string)) *Session_ListReservations_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(string)) }) @@ -273,9 +273,9 @@ func (_c *Session_ListReservations_Call) RunAndReturn(run func(string) []types.R return _c } -// ReleaseReservationByID provides a mock function with given fields: pool, id, ifName -func (_m *Session) ReleaseReservationByID(pool string, id string, ifName string) { - _m.Called(pool, id, ifName) +// ReleaseReservationByID provides a mock function with given fields: poolKey, id, ifName +func (_m *Session) ReleaseReservationByID(poolKey string, id string, ifName string) { + _m.Called(poolKey, id, ifName) } // Session_ReleaseReservationByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReleaseReservationByID' @@ -284,14 +284,14 @@ type Session_ReleaseReservationByID_Call struct { } // ReleaseReservationByID is a helper method to define mock.On call -// - pool string +// - poolKey string // - id string // - ifName string -func (_e *Session_Expecter) ReleaseReservationByID(pool interface{}, id interface{}, ifName interface{}) *Session_ReleaseReservationByID_Call { - return &Session_ReleaseReservationByID_Call{Call: _e.mock.On("ReleaseReservationByID", pool, id, ifName)} +func (_e *Session_Expecter) ReleaseReservationByID(poolKey interface{}, id interface{}, ifName interface{}) *Session_ReleaseReservationByID_Call { + return &Session_ReleaseReservationByID_Call{Call: _e.mock.On("ReleaseReservationByID", poolKey, id, ifName)} } -func (_c *Session_ReleaseReservationByID_Call) Run(run func(pool string, id string, ifName string)) *Session_ReleaseReservationByID_Call { +func (_c *Session_ReleaseReservationByID_Call) Run(run func(poolKey string, id string, ifName string)) *Session_ReleaseReservationByID_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(string), args[1].(string), args[2].(string)) }) @@ -308,9 +308,9 @@ func (_c *Session_ReleaseReservationByID_Call) RunAndReturn(run func(string, str return _c } -// RemovePool provides a mock function with given fields: pool -func (_m *Session) RemovePool(pool string) { - _m.Called(pool) +// RemovePool provides a mock function with given fields: poolKey +func (_m *Session) RemovePool(poolKey string) { + _m.Called(poolKey) } // Session_RemovePool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemovePool' @@ -319,12 +319,12 @@ type Session_RemovePool_Call struct { } // RemovePool is a helper method to define mock.On call -// - pool string -func (_e *Session_Expecter) RemovePool(pool interface{}) *Session_RemovePool_Call { - return &Session_RemovePool_Call{Call: _e.mock.On("RemovePool", pool)} +// - poolKey string +func (_e *Session_Expecter) RemovePool(poolKey interface{}) *Session_RemovePool_Call { + return &Session_RemovePool_Call{Call: _e.mock.On("RemovePool", poolKey)} } -func (_c *Session_RemovePool_Call) Run(run func(pool string)) *Session_RemovePool_Call { +func (_c *Session_RemovePool_Call) Run(run func(poolKey string)) *Session_RemovePool_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(string)) }) @@ -341,13 +341,13 @@ func (_c *Session_RemovePool_Call) RunAndReturn(run func(string)) *Session_Remov return _c } -// Reserve provides a mock function with given fields: pool, id, ifName, meta, address -func (_m *Session) Reserve(pool string, id string, ifName string, meta types.ReservationMetadata, address net.IP) error { - ret := _m.Called(pool, id, ifName, meta, address) +// Reserve provides a mock function with given fields: poolKey, id, ifName, meta, address +func (_m *Session) Reserve(poolKey string, id string, ifName string, meta types.ReservationMetadata, address net.IP) error { + ret := _m.Called(poolKey, id, ifName, meta, address) var r0 error if rf, ok := ret.Get(0).(func(string, string, string, types.ReservationMetadata, net.IP) error); ok { - r0 = rf(pool, id, ifName, meta, address) + r0 = rf(poolKey, id, ifName, meta, address) } else { r0 = ret.Error(0) } @@ -361,16 +361,16 @@ type Session_Reserve_Call struct { } // Reserve is a helper method to define mock.On call -// - pool string +// - poolKey string // - id string // - ifName string // - meta types.ReservationMetadata // - address net.IP -func (_e *Session_Expecter) Reserve(pool interface{}, id interface{}, ifName interface{}, meta interface{}, address interface{}) *Session_Reserve_Call { - return &Session_Reserve_Call{Call: _e.mock.On("Reserve", pool, id, ifName, meta, address)} +func (_e *Session_Expecter) Reserve(poolKey interface{}, id interface{}, ifName interface{}, meta interface{}, address interface{}) *Session_Reserve_Call { + return &Session_Reserve_Call{Call: _e.mock.On("Reserve", poolKey, id, ifName, meta, address)} } -func (_c *Session_Reserve_Call) Run(run func(pool string, id string, ifName string, meta types.ReservationMetadata, address net.IP)) *Session_Reserve_Call { +func (_c *Session_Reserve_Call) Run(run func(poolKey string, id string, ifName string, meta types.ReservationMetadata, address net.IP)) *Session_Reserve_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(string), args[1].(string), args[2].(string), args[3].(types.ReservationMetadata), args[4].(net.IP)) }) @@ -387,9 +387,9 @@ func (_c *Session_Reserve_Call) RunAndReturn(run func(string, string, string, ty return _c } -// SetLastReservedIP provides a mock function with given fields: pool, ip -func (_m *Session) SetLastReservedIP(pool string, ip net.IP) { - _m.Called(pool, ip) +// SetLastReservedIP provides a mock function with given fields: poolKey, ip +func (_m *Session) SetLastReservedIP(poolKey string, ip net.IP) { + _m.Called(poolKey, ip) } // Session_SetLastReservedIP_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetLastReservedIP' @@ -398,13 +398,13 @@ type Session_SetLastReservedIP_Call struct { } // SetLastReservedIP is a helper method to define mock.On call -// - pool string +// - poolKey string // - ip net.IP -func (_e *Session_Expecter) SetLastReservedIP(pool interface{}, ip interface{}) *Session_SetLastReservedIP_Call { - return &Session_SetLastReservedIP_Call{Call: _e.mock.On("SetLastReservedIP", pool, ip)} +func (_e *Session_Expecter) SetLastReservedIP(poolKey interface{}, ip interface{}) *Session_SetLastReservedIP_Call { + return &Session_SetLastReservedIP_Call{Call: _e.mock.On("SetLastReservedIP", poolKey, ip)} } -func (_c *Session_SetLastReservedIP_Call) Run(run func(pool string, ip net.IP)) *Session_SetLastReservedIP_Call { +func (_c *Session_SetLastReservedIP_Call) Run(run func(poolKey string, ip net.IP)) *Session_SetLastReservedIP_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(string), args[1].(net.IP)) }) diff --git a/pkg/ipam-node/store/store.go b/pkg/ipam-node/store/store.go index 0c53577..c1d8f3b 100644 --- a/pkg/ipam-node/store/store.go +++ b/pkg/ipam-node/store/store.go @@ -55,22 +55,22 @@ type Store interface { type Session interface { // Reserve reserves IP for the id and interface name, // returns error if allocation failed - Reserve(pool string, id string, ifName string, meta types.ReservationMetadata, address net.IP) error + Reserve(poolKey string, id string, ifName string, meta types.ReservationMetadata, address net.IP) error // ListReservations list all reservations in the pool - ListReservations(pool string) []types.Reservation + ListReservations(poolKey string) []types.Reservation // ListPools return list with names of all known pools ListPools() []string // RemovePool removes information about the pool from the store - RemovePool(pool string) + RemovePool(poolKey string) // GetLastReservedIP returns last reserved IP for the pool or nil - GetLastReservedIP(pool string) net.IP + GetLastReservedIP(poolKey string) net.IP // SetLastReservedIP set last reserved IP fot the pool - SetLastReservedIP(pool string, ip net.IP) + SetLastReservedIP(poolKey string, ip net.IP) // ReleaseReservationByID releases reservation by id and interface name - ReleaseReservationByID(pool string, id string, ifName string) + ReleaseReservationByID(poolKey string, id string, ifName string) // GetReservationByID returns existing reservation for id and interface name, // return nil if allocation not found - GetReservationByID(pool string, id string, ifName string) *types.Reservation + GetReservationByID(poolKey string, id string, ifName string) *types.Reservation // Commit writes persistedData to the disk and release the lock. // the store can't be used after this call Commit() error @@ -192,10 +192,11 @@ func (s *session) checkClosed() { } // Reserve is the Session interface implementation for session -func (s *session) Reserve(pool string, id string, ifName string, meta types.ReservationMetadata, address net.IP) error { +func (s *session) Reserve(poolKey string, id string, ifName string, + meta types.ReservationMetadata, address net.IP) error { s.checkClosed() reservationKey := s.getKey(id, ifName) - poolData := s.getPoolData(pool, s.tmpData) + poolData := s.getPoolData(poolKey, s.tmpData) _, exist := poolData.Entries[reservationKey] if exist { return ErrReservationAlreadyExist @@ -221,26 +222,26 @@ func (s *session) Reserve(pool string, id string, ifName string, meta types.Rese } reservation.Metadata.CreateTime = time.Now().Format(time.RFC3339Nano) poolData.Entries[reservationKey] = reservation - s.tmpData.Pools[pool] = *poolData + s.tmpData.Pools[poolKey] = *poolData s.isModified = true return nil } -func (s *session) getPoolData(pool string, layout *types.Root) *types.PoolReservations { - res, exist := layout.Pools[pool] +func (s *session) getPoolData(poolKey string, layout *types.Root) *types.PoolReservations { + res, exist := layout.Pools[poolKey] if exist { if res.Entries == nil { res.Entries = map[string]types.Reservation{} } return &res } - return types.NewPoolReservations(pool) + return types.NewPoolReservations(poolKey) } // ListReservations is the Session interface implementation for session -func (s *session) ListReservations(pool string) []types.Reservation { +func (s *session) ListReservations(poolKey string) []types.Reservation { s.checkClosed() - poolData := s.getPoolData(pool, s.tmpData) + poolData := s.getPoolData(poolKey, s.tmpData) allocations := make([]types.Reservation, 0, len(poolData.Entries)) for _, a := range poolData.Entries { allocations = append(allocations, a) @@ -259,41 +260,41 @@ func (s *session) ListPools() []string { } // RemovePool is the Session interface implementation for session -func (s *session) RemovePool(pool string) { +func (s *session) RemovePool(poolKey string) { s.checkClosed() - delete(s.tmpData.Pools, pool) + delete(s.tmpData.Pools, poolKey) s.isModified = true } // GetLastReservedIP is the Session interface implementation for session -func (s *session) GetLastReservedIP(pool string) net.IP { +func (s *session) GetLastReservedIP(poolKey string) net.IP { s.checkClosed() - poolData := s.getPoolData(pool, s.tmpData) + poolData := s.getPoolData(poolKey, s.tmpData) return poolData.LastReservedIP } // SetLastReservedIP is the Session interface implementation for session -func (s *session) SetLastReservedIP(pool string, ip net.IP) { +func (s *session) SetLastReservedIP(poolKey string, ip net.IP) { s.checkClosed() - poolData := s.getPoolData(pool, s.tmpData) + poolData := s.getPoolData(poolKey, s.tmpData) poolData.LastReservedIP = ip - s.tmpData.Pools[pool] = *poolData + s.tmpData.Pools[poolKey] = *poolData s.isModified = true } // ReleaseReservationByID is the Session interface implementation for session -func (s *session) ReleaseReservationByID(pool string, id string, ifName string) { +func (s *session) ReleaseReservationByID(poolKey string, id string, ifName string) { s.checkClosed() - poolData := s.getPoolData(pool, s.tmpData) + poolData := s.getPoolData(poolKey, s.tmpData) delete(poolData.Entries, s.getKey(id, ifName)) - s.tmpData.Pools[pool] = *poolData + s.tmpData.Pools[poolKey] = *poolData s.isModified = true } // GetReservationByID is the Session interface implementation for session -func (s *session) GetReservationByID(pool string, id string, ifName string) *types.Reservation { +func (s *session) GetReservationByID(poolKey string, id string, ifName string) *types.Reservation { s.checkClosed() - poolData := s.getPoolData(pool, s.tmpData) + poolData := s.getPoolData(poolKey, s.tmpData) reservation, exist := poolData.Entries[s.getKey(id, ifName)] if !exist { return nil diff --git a/pkg/ipam-node/store/store_test.go b/pkg/ipam-node/store/store_test.go index 160e56f..23a79c0 100644 --- a/pkg/ipam-node/store/store_test.go +++ b/pkg/ipam-node/store/store_test.go @@ -30,7 +30,7 @@ import ( ) const ( - testPoolName = "pool1" + testPoolKey = "pool1" testContainerID = "id1" testNetIfName = "net0" testPodUUID = "a9516e9d-6f45-4693-b299-cc3d2f83e26a" @@ -42,7 +42,7 @@ const ( ) func createTestReservation(s storePkg.Session) { - ExpectWithOffset(1, s.Reserve(testPoolName, testContainerID, testNetIfName, types.ReservationMetadata{ + ExpectWithOffset(1, s.Reserve(testPoolKey, testContainerID, testNetIfName, types.ReservationMetadata{ CreateTime: time.Now().Format(time.RFC3339Nano), PodUUID: testPodUUID, PodName: testPodName, @@ -73,30 +73,30 @@ var _ = Describe("Store", func() { createTestReservation(s) By("Check reservation exist") - res := s.GetReservationByID(testPoolName, testContainerID, testNetIfName) + res := s.GetReservationByID(testPoolKey, testContainerID, testNetIfName) Expect(res).NotTo(BeNil()) Expect(res.ContainerID).To(Equal(testContainerID)) - resList := s.ListReservations(testPoolName) + resList := s.ListReservations(testPoolKey) Expect(resList).To(HaveLen(1)) Expect(resList[0].ContainerID).To(Equal(testContainerID)) pools := s.ListPools() - Expect(pools).To(Equal([]string{testPoolName})) + Expect(pools).To(Equal([]string{testPoolKey})) By("Check last reserved IP") - Expect(s.GetLastReservedIP(testPoolName)).To(Equal(net.ParseIP(testIP))) + Expect(s.GetLastReservedIP(testPoolKey)).To(Equal(net.ParseIP(testIP))) By("Set last reserved IP") newLastReservedIP := net.ParseIP("192.168.1.200") - s.SetLastReservedIP(testPoolName, newLastReservedIP) - Expect(s.GetLastReservedIP(testPoolName)).To(Equal(newLastReservedIP)) + s.SetLastReservedIP(testPoolKey, newLastReservedIP) + Expect(s.GetLastReservedIP(testPoolKey)).To(Equal(newLastReservedIP)) By("Release reservation") - s.ReleaseReservationByID(testPoolName, testContainerID, testNetIfName) + s.ReleaseReservationByID(testPoolKey, testContainerID, testNetIfName) By("Check reservation removed") - Expect(s.GetReservationByID(testPoolName, testContainerID, testNetIfName)).To(BeNil()) + Expect(s.GetReservationByID(testPoolKey, testContainerID, testNetIfName)).To(BeNil()) By("Commit changes") Expect(s.Commit()).NotTo(HaveOccurred()) @@ -109,7 +109,7 @@ var _ = Describe("Store", func() { s, err = store.Open(context.Background()) Expect(err).NotTo(HaveOccurred()) - res := s.GetReservationByID(testPoolName, testContainerID, testNetIfName) + res := s.GetReservationByID(testPoolKey, testContainerID, testNetIfName) Expect(res).NotTo(BeNil()) Expect(res.ContainerID).To(Equal(testContainerID)) }) @@ -121,15 +121,15 @@ var _ = Describe("Store", func() { s, err = store.Open(context.Background()) Expect(err).NotTo(HaveOccurred()) - Expect(s.GetReservationByID(testPoolName, testContainerID, testNetIfName)).To(BeNil()) + Expect(s.GetReservationByID(testPoolKey, testContainerID, testNetIfName)).To(BeNil()) }) It("Closed session should panic", func() { s, err := store.Open(context.Background()) Expect(err).NotTo(HaveOccurred()) s.Cancel() - Expect(func() { s.GetReservationByID(testPoolName, testContainerID, testNetIfName) }).To(Panic()) - Expect(func() { s.ListReservations(testPoolName) }).To(Panic()) - Expect(func() { s.GetLastReservedIP(testPoolName) }).To(Panic()) + Expect(func() { s.GetReservationByID(testPoolKey, testContainerID, testNetIfName) }).To(Panic()) + Expect(func() { s.ListReservations(testPoolKey) }).To(Panic()) + Expect(func() { s.GetLastReservedIP(testPoolKey) }).To(Panic()) }) It("Reload data from the disk", func() { s, err := store.Open(context.Background()) @@ -140,7 +140,7 @@ var _ = Describe("Store", func() { store2 := storePkg.New(storePath) s, err = store2.Open(context.Background()) Expect(err).NotTo(HaveOccurred()) - Expect(s.GetReservationByID(testPoolName, testContainerID, testNetIfName)).NotTo(BeNil()) + Expect(s.GetReservationByID(testPoolKey, testContainerID, testNetIfName)).NotTo(BeNil()) }) It("Concurrent access", func() { done := make(chan interface{}) @@ -164,7 +164,7 @@ var _ = Describe("Store", func() { s2, err := store.Open(context.Background()) Expect(err).NotTo(HaveOccurred()) ch <- 2 - Expect(s2.GetReservationByID(testPoolName, testContainerID, testNetIfName)).NotTo(BeNil()) + Expect(s2.GetReservationByID(testPoolKey, testContainerID, testNetIfName)).NotTo(BeNil()) s2.Cancel() }() wg.Wait() @@ -219,7 +219,7 @@ var _ = Describe("Store", func() { Expect(err).NotTo(HaveOccurred()) createTestReservation(s) Expect( - s.Reserve(testPoolName, testContainerID, testNetIfName, + s.Reserve(testPoolKey, testContainerID, testNetIfName, types.ReservationMetadata{}, net.ParseIP(testIP2))).To(MatchError(storePkg.ErrReservationAlreadyExist)) }) It("Duplicate IP allocation", func() { @@ -227,15 +227,15 @@ var _ = Describe("Store", func() { Expect(err).NotTo(HaveOccurred()) createTestReservation(s) Expect( - s.Reserve(testPoolName, "other", testNetIfName, + s.Reserve(testPoolKey, "other", testNetIfName, types.ReservationMetadata{}, net.ParseIP(testIP))).To(MatchError(storePkg.ErrIPAlreadyReserved)) }) It("Remove pool data", func() { s, err := store.Open(context.Background()) Expect(err).NotTo(HaveOccurred()) createTestReservation(s) - Expect(s.ListReservations(testPoolName)).NotTo(BeEmpty()) - s.RemovePool(testPoolName) - Expect(s.ListReservations(testPoolName)).To(BeEmpty()) + Expect(s.ListReservations(testPoolKey)).NotTo(BeEmpty()) + s.RemovePool(testPoolKey) + Expect(s.ListReservations(testPoolKey)).To(BeEmpty()) }) }) diff --git a/pkg/pool/annotations.go b/pkg/pool/annotations.go index e8f6e47..ccfd948 100644 --- a/pkg/pool/annotations.go +++ b/pkg/pool/annotations.go @@ -21,7 +21,7 @@ import ( ) // SetIPBlockAnnotation serialize IP pools settings for the node and add this info as annotation -func SetIPBlockAnnotation(node *v1.Node, pools map[string]*IPPool) error { +func SetIPBlockAnnotation(node *v1.Node, pools map[string]*Pool) error { annotations := node.GetAnnotations() if annotations == nil { annotations = map[string]string{} diff --git a/pkg/pool/annotations_test.go b/pkg/pool/annotations_test.go index c1b8297..7438528 100644 --- a/pkg/pool/annotations_test.go +++ b/pkg/pool/annotations_test.go @@ -26,15 +26,15 @@ import ( var _ = Describe("annotations tests", func() { Context("SetIPBlockAnnotation", func() { - testPools := make(map[string]*pool.IPPool) - testPools["my-pool-1"] = &pool.IPPool{ + testPools := make(map[string]*pool.Pool) + testPools["my-pool-1"] = &pool.Pool{ Name: "my-pool-1", Subnet: "192.168.0.0/16", StartIP: "192.168.0.2", EndIP: "192.168.0.254", Gateway: "192.168.0.1", } - testPools["my-pool-2"] = &pool.IPPool{ + testPools["my-pool-2"] = &pool.Pool{ Name: "my-pool-2", Subnet: "10.100.0.0/16", StartIP: "10.100.0.2", diff --git a/pkg/pool/manager.go b/pkg/pool/manager.go index 995d13c..163af81 100644 --- a/pkg/pool/manager.go +++ b/pkg/pool/manager.go @@ -21,45 +21,45 @@ import "sync" type Manager interface { ConfigReader // Update Pool's config from IPPool CR - UpdatePool(pool *IPPool) - // Remove Pool's config - RemovePool(poolName string) + UpdatePool(key string, pool *Pool) + // Remove Pool's config by key + RemovePool(key string) } // NewManager create and initialize new manager instance func NewManager() Manager { return &manager{ - poolByName: make(map[string]*IPPool), + poolByKey: make(map[string]*Pool), } } type manager struct { - lock sync.Mutex - poolByName map[string]*IPPool + lock sync.Mutex + poolByKey map[string]*Pool } -func (m *manager) UpdatePool(pool *IPPool) { +func (m *manager) UpdatePool(key string, pool *Pool) { m.lock.Lock() defer m.lock.Unlock() - m.poolByName[pool.Name] = pool + m.poolByKey[key] = pool } -func (m *manager) RemovePool(poolName string) { +func (m *manager) RemovePool(key string) { m.lock.Lock() defer m.lock.Unlock() - delete(m.poolByName, poolName) + delete(m.poolByKey, key) } -// GetPoolByName is the Manager interface implementation for the manager -func (m *manager) GetPoolByName(name string) *IPPool { +// GetPoolByKey is the Manager interface implementation for the manager +func (m *manager) GetPoolByKey(key string) *Pool { m.lock.Lock() defer m.lock.Unlock() - return m.poolByName[name] + return m.poolByKey[key] } // GetPools is the Manager interface implementation for the manager -func (m *manager) GetPools() map[string]*IPPool { +func (m *manager) GetPools() map[string]*Pool { m.lock.Lock() defer m.lock.Unlock() - return m.poolByName + return m.poolByKey } diff --git a/pkg/pool/manager_test.go b/pkg/pool/manager_test.go index af5e7b7..9ea59e8 100644 --- a/pkg/pool/manager_test.go +++ b/pkg/pool/manager_test.go @@ -22,20 +22,20 @@ import ( var _ = Describe("Manager", func() { It("Update pool data", func() { - testPoolName := "my-pool-1" - testPool := &pool.IPPool{ - Name: testPoolName, + testPoolKey := "my-pool-1" + testPool := &pool.Pool{ + Name: testPoolKey, Subnet: "192.168.0.0/16", StartIP: "192.168.0.2", EndIP: "192.168.0.254", Gateway: "192.168.0.1", } mgr := pool.NewManager() - Expect(mgr.GetPoolByName(testPoolName)).To(BeNil()) - mgr.UpdatePool(testPool) - Expect(mgr.GetPoolByName(testPoolName)).NotTo(BeNil()) + Expect(mgr.GetPoolByKey(testPoolKey)).To(BeNil()) + mgr.UpdatePool(testPoolKey, testPool) + Expect(mgr.GetPoolByKey(testPoolKey)).NotTo(BeNil()) Expect(mgr.GetPools()).To(HaveLen(1)) - mgr.RemovePool(testPoolName) - Expect(mgr.GetPoolByName(testPoolName)).To(BeNil()) + mgr.RemovePool(testPoolKey) + Expect(mgr.GetPoolByKey(testPoolKey)).To(BeNil()) }) }) diff --git a/pkg/pool/mocks/Manager.go b/pkg/pool/mocks/Manager.go index ba7ca01..d8e4738 100644 --- a/pkg/pool/mocks/Manager.go +++ b/pkg/pool/mocks/Manager.go @@ -5,8 +5,6 @@ package mocks import ( pool "github.com/Mellanox/nvidia-k8s-ipam/pkg/pool" mock "github.com/stretchr/testify/mock" - - v1 "k8s.io/api/core/v1" ) // Manager is an autogenerated mock type for the Manager type @@ -22,60 +20,60 @@ func (_m *Manager) EXPECT() *Manager_Expecter { return &Manager_Expecter{mock: &_m.Mock} } -// GetPoolByName provides a mock function with given fields: name -func (_m *Manager) GetPoolByName(name string) *pool.IPPool { - ret := _m.Called(name) +// GetPoolByKey provides a mock function with given fields: key +func (_m *Manager) GetPoolByKey(key string) *pool.Pool { + ret := _m.Called(key) - var r0 *pool.IPPool - if rf, ok := ret.Get(0).(func(string) *pool.IPPool); ok { - r0 = rf(name) + var r0 *pool.Pool + if rf, ok := ret.Get(0).(func(string) *pool.Pool); ok { + r0 = rf(key) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*pool.IPPool) + r0 = ret.Get(0).(*pool.Pool) } } return r0 } -// Manager_GetPoolByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPoolByName' -type Manager_GetPoolByName_Call struct { +// Manager_GetPoolByKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPoolByKey' +type Manager_GetPoolByKey_Call struct { *mock.Call } -// GetPoolByName is a helper method to define mock.On call -// - name string -func (_e *Manager_Expecter) GetPoolByName(name interface{}) *Manager_GetPoolByName_Call { - return &Manager_GetPoolByName_Call{Call: _e.mock.On("GetPoolByName", name)} +// GetPoolByKey is a helper method to define mock.On call +// - key string +func (_e *Manager_Expecter) GetPoolByKey(key interface{}) *Manager_GetPoolByKey_Call { + return &Manager_GetPoolByKey_Call{Call: _e.mock.On("GetPoolByKey", key)} } -func (_c *Manager_GetPoolByName_Call) Run(run func(name string)) *Manager_GetPoolByName_Call { +func (_c *Manager_GetPoolByKey_Call) Run(run func(key string)) *Manager_GetPoolByKey_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(string)) }) return _c } -func (_c *Manager_GetPoolByName_Call) Return(_a0 *pool.IPPool) *Manager_GetPoolByName_Call { +func (_c *Manager_GetPoolByKey_Call) Return(_a0 *pool.Pool) *Manager_GetPoolByKey_Call { _c.Call.Return(_a0) return _c } -func (_c *Manager_GetPoolByName_Call) RunAndReturn(run func(string) *pool.IPPool) *Manager_GetPoolByName_Call { +func (_c *Manager_GetPoolByKey_Call) RunAndReturn(run func(string) *pool.Pool) *Manager_GetPoolByKey_Call { _c.Call.Return(run) return _c } // GetPools provides a mock function with given fields: -func (_m *Manager) GetPools() map[string]*pool.IPPool { +func (_m *Manager) GetPools() map[string]*pool.Pool { ret := _m.Called() - var r0 map[string]*pool.IPPool - if rf, ok := ret.Get(0).(func() map[string]*pool.IPPool); ok { + var r0 map[string]*pool.Pool + if rf, ok := ret.Get(0).(func() map[string]*pool.Pool); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]*pool.IPPool) + r0 = ret.Get(0).(map[string]*pool.Pool) } } @@ -99,86 +97,79 @@ func (_c *Manager_GetPools_Call) Run(run func()) *Manager_GetPools_Call { return _c } -func (_c *Manager_GetPools_Call) Return(_a0 map[string]*pool.IPPool) *Manager_GetPools_Call { +func (_c *Manager_GetPools_Call) Return(_a0 map[string]*pool.Pool) *Manager_GetPools_Call { _c.Call.Return(_a0) return _c } -func (_c *Manager_GetPools_Call) RunAndReturn(run func() map[string]*pool.IPPool) *Manager_GetPools_Call { +func (_c *Manager_GetPools_Call) RunAndReturn(run func() map[string]*pool.Pool) *Manager_GetPools_Call { _c.Call.Return(run) return _c } -// Reset provides a mock function with given fields: -func (_m *Manager) Reset() { - _m.Called() +// RemovePool provides a mock function with given fields: key +func (_m *Manager) RemovePool(key string) { + _m.Called(key) } -// Manager_Reset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reset' -type Manager_Reset_Call struct { +// Manager_RemovePool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemovePool' +type Manager_RemovePool_Call struct { *mock.Call } -// Reset is a helper method to define mock.On call -func (_e *Manager_Expecter) Reset() *Manager_Reset_Call { - return &Manager_Reset_Call{Call: _e.mock.On("Reset")} +// RemovePool is a helper method to define mock.On call +// - key string +func (_e *Manager_Expecter) RemovePool(key interface{}) *Manager_RemovePool_Call { + return &Manager_RemovePool_Call{Call: _e.mock.On("RemovePool", key)} } -func (_c *Manager_Reset_Call) Run(run func()) *Manager_Reset_Call { +func (_c *Manager_RemovePool_Call) Run(run func(key string)) *Manager_RemovePool_Call { _c.Call.Run(func(args mock.Arguments) { - run() + run(args[0].(string)) }) return _c } -func (_c *Manager_Reset_Call) Return() *Manager_Reset_Call { +func (_c *Manager_RemovePool_Call) Return() *Manager_RemovePool_Call { _c.Call.Return() return _c } -func (_c *Manager_Reset_Call) RunAndReturn(run func()) *Manager_Reset_Call { +func (_c *Manager_RemovePool_Call) RunAndReturn(run func(string)) *Manager_RemovePool_Call { _c.Call.Return(run) return _c } -// Update provides a mock function with given fields: node -func (_m *Manager) Update(node *v1.Node) error { - ret := _m.Called(node) - - var r0 error - if rf, ok := ret.Get(0).(func(*v1.Node) error); ok { - r0 = rf(node) - } else { - r0 = ret.Error(0) - } - - return r0 +// UpdatePool provides a mock function with given fields: key, _a1 +func (_m *Manager) UpdatePool(key string, _a1 *pool.Pool) { + _m.Called(key, _a1) } -// Manager_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' -type Manager_Update_Call struct { +// Manager_UpdatePool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdatePool' +type Manager_UpdatePool_Call struct { *mock.Call } -// Update is a helper method to define mock.On call -// - node *v1.Node -func (_e *Manager_Expecter) Update(node interface{}) *Manager_Update_Call { - return &Manager_Update_Call{Call: _e.mock.On("Update", node)} +// UpdatePool is a helper method to define mock.On call +// - key string +// - _a1 *pool.Pool +func (_e *Manager_Expecter) UpdatePool(key interface{}, _a1 interface{}) *Manager_UpdatePool_Call { + return &Manager_UpdatePool_Call{Call: _e.mock.On("UpdatePool", key, _a1)} } -func (_c *Manager_Update_Call) Run(run func(node *v1.Node)) *Manager_Update_Call { +func (_c *Manager_UpdatePool_Call) Run(run func(key string, _a1 *pool.Pool)) *Manager_UpdatePool_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*v1.Node)) + run(args[0].(string), args[1].(*pool.Pool)) }) return _c } -func (_c *Manager_Update_Call) Return(_a0 error) *Manager_Update_Call { - _c.Call.Return(_a0) +func (_c *Manager_UpdatePool_Call) Return() *Manager_UpdatePool_Call { + _c.Call.Return() return _c } -func (_c *Manager_Update_Call) RunAndReturn(run func(*v1.Node) error) *Manager_Update_Call { +func (_c *Manager_UpdatePool_Call) RunAndReturn(run func(string, *pool.Pool)) *Manager_UpdatePool_Call { _c.Call.Return(run) return _c } diff --git a/pkg/pool/reader.go b/pkg/pool/reader.go index 79dec63..c888b1c 100644 --- a/pkg/pool/reader.go +++ b/pkg/pool/reader.go @@ -24,17 +24,24 @@ const ( IPBlocksAnnotation = "ipam.nvidia.com/ip-blocks" ) -// IPPool represents a block of IPs from a given Subnet -type IPPool struct { - Name string `json:"-"` - Subnet string `json:"subnet"` +// Pool represents generic pool configuration +type Pool struct { + Name string `json:"-"` + Subnet string `json:"subnet"` + StartIP string `json:"startIP"` + EndIP string `json:"endIP"` + Gateway string `json:"gateway"` + Exclusions []ExclusionRange `json:"exclusions"` +} + +// ExclusionRange contains range of IP to exclude from the allocation +type ExclusionRange struct { StartIP string `json:"startIP"` EndIP string `json:"endIP"` - Gateway string `json:"gateway"` } // String return string representation of the IPPool config -func (p *IPPool) String() string { +func (p *Pool) String() string { //nolint:errchkjson data, _ := json.Marshal(p) return string(data) @@ -42,14 +49,14 @@ func (p *IPPool) String() string { // ConfigReader is an interface to which provides access to the pool configuration type ConfigReader interface { - // GetPoolByName returns IPPool for the provided pool name or nil if pool doesnt exist - GetPoolByName(name string) *IPPool + // GetPoolByKey returns IPPool for the provided pool name or nil if pool doesn't exist + GetPoolByKey(key string) *Pool // GetPools returns map with information about all pools - GetPools() map[string]*IPPool + GetPools() map[string]*Pool } type configReader struct { - poolByName map[string]*IPPool + poolByKey map[string]*Pool } func NewConfigReader(node *v1.Node) (ConfigReader, error) { @@ -62,27 +69,27 @@ func NewConfigReader(node *v1.Node) (ConfigReader, error) { return nil, fmt.Errorf("%s node annotation not found", IPBlocksAnnotation) } - poolByName := make(map[string]*IPPool) - err := json.Unmarshal([]byte(blocks), &poolByName) + poolByKey := make(map[string]*Pool) + err := json.Unmarshal([]byte(blocks), &poolByKey) if err != nil { return nil, fmt.Errorf("failed to parse %s annotation content. %w", IPBlocksAnnotation, err) } - for poolName, pool := range poolByName { + for poolName, pool := range poolByKey { pool.Name = poolName } return &configReader{ - poolByName: poolByName, + poolByKey: poolByKey, }, nil } -// GetPoolByName implements ConfigReader interface -func (r *configReader) GetPoolByName(name string) *IPPool { - return r.poolByName[name] +// GetPoolByKey implements ConfigReader interface +func (r *configReader) GetPoolByKey(key string) *Pool { + return r.poolByKey[key] } // GetPools implements ConfigReader interface -func (r *configReader) GetPools() map[string]*IPPool { - return r.poolByName +func (r *configReader) GetPools() map[string]*Pool { + return r.poolByKey } diff --git a/pkg/pool/reader_test.go b/pkg/pool/reader_test.go index ee7b947..0759827 100644 --- a/pkg/pool/reader_test.go +++ b/pkg/pool/reader_test.go @@ -62,7 +62,7 @@ var _ = Describe("pool tests", func() { }) }) - Context("GetPoolByName()", func() { + Context("GetPoolByKey()", func() { var r pool.ConfigReader BeforeEach(func() { @@ -79,12 +79,12 @@ var _ = Describe("pool tests", func() { }) It("returns nil if pool does not exist", func() { - p := r.GetPoolByName("non-existent-pool") + p := r.GetPoolByKey("non-existent-key") Expect(p).To(BeNil()) }) It("returns pool if exists", func() { - p := r.GetPoolByName("my-pool") + p := r.GetPoolByKey("my-pool") Expect(p).ToNot(BeNil()) Expect(p.Subnet).To(Equal("192.168.0.0/16")) })