Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CIDRPool 3/x] update ipam-node to support CIDRPool #44

Merged
merged 6 commits into from
Jun 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
248 changes: 167 additions & 81 deletions api/grpc/nvidia/ipam/node/v1/node.pb.go

Large diffs are not rendered by default.

11 changes: 11 additions & 0 deletions api/grpc/proto/nvidia/ipam/node/v1/node.proto
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,13 @@ syntax = "proto3";

package nvidia.ipam.node.v1;

// indicates type of the pool which is referred by the name
enum PoolType {
POOL_TYPE_UNSPECIFIED = 0;
vasrem marked this conversation as resolved.
Show resolved Hide resolved
POOL_TYPE_IPPOOL = 1;
POOL_TYPE_CIDRPOOL = 2;
}

// gRPC service definition for NVIDIA IPAM node daemon
service IPAMService {
// Allocate is called as a part of CMD_ADD flow.
Expand Down Expand Up @@ -64,6 +71,8 @@ message IPAMParameters {
string cni_ifname = 3;
// required, additional metadata to identify IP allocation
IPAMMetadata metadata = 4;
// type of the pool which is refered by the name in the pools field
PoolType pool_type = 5;
}

// IPAMMetadata contains metadata for IPAM calls
Expand Down Expand Up @@ -104,6 +113,8 @@ message AllocationInfo {
string ip = 2;
// gateway for allocated IP
string gateway = 3;
// type of the pool which is refered by the name in the pools field
PoolType pool_type = 4;
}

// IsAllocatedReply contains reply for IsAllocated rpc call
Expand Down
11 changes: 11 additions & 0 deletions cmd/ipam-node/app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ import (
"github.com/Mellanox/nvidia-k8s-ipam/pkg/common"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/allocator"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/cleaner"
cidrpoolctrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/controllers/cidrpool"
ippoolctrl "github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/controllers/ippool"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/grpc/middleware"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-node/handlers"
Expand Down Expand Up @@ -157,6 +158,7 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio
k8sClient, err := client.New(config, client.Options{Scheme: mgr.GetScheme(), Mapper: mgr.GetRESTMapper()})
if err != nil {
logger.Error(err, "unable to direct k8s client")
return err
}

if err = (&ippoolctrl.IPPoolReconciler{
Expand All @@ -168,6 +170,15 @@ func RunNodeDaemon(ctx context.Context, config *rest.Config, opts *options.Optio
logger.Error(err, "unable to create controller", "controller", "IPPool")
return err
}
if err = (&cidrpoolctrl.CIDRPoolReconciler{
PoolManager: poolManager,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
NodeName: opts.NodeName,
}).SetupWithManager(mgr); err != nil {
logger.Error(err, "unable to create controller", "controller", "CIDRPool")
return err
}

if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
logger.Error(err, "unable to set up health check")
Expand Down
166 changes: 95 additions & 71 deletions cmd/ipam-node/app/app_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"google.golang.org/grpc/status"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"

nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1"
Expand All @@ -44,7 +43,7 @@ const (
testNamespace = "default"
)

func createTestPools() {
func createTestIPPools() {
pool1 := &ipamv1alpha1.IPPool{
ObjectMeta: metav1.ObjectMeta{Name: testPoolName1, Namespace: testNamespace},
Spec: ipamv1alpha1.IPPoolSpec{
Expand All @@ -54,6 +53,15 @@ func createTestPools() {
},
}
ExpectWithOffset(1, k8sClient.Create(ctx, pool1))
pool1.Status = ipamv1alpha1.IPPoolStatus{
Allocations: []ipamv1alpha1.Allocation{
{
NodeName: testNodeName,
StartIP: "192.168.0.2",
EndIP: "192.168.0.254",
},
}}
ExpectWithOffset(1, k8sClient.Status().Update(ctx, pool1))

pool2 := &ipamv1alpha1.IPPool{
ObjectMeta: metav1.ObjectMeta{Name: testPoolName2, Namespace: testNamespace},
Expand All @@ -64,47 +72,57 @@ func createTestPools() {
},
}
ExpectWithOffset(1, k8sClient.Create(ctx, pool2))

// Update statuses with range allocation
Eventually(func(g Gomega) error {
status := ipamv1alpha1.IPPoolStatus{
Allocations: []ipamv1alpha1.Allocation{
{
NodeName: testNodeName,
StartIP: "192.168.0.2",
EndIP: "192.168.0.254",
},
},
}
return updatePoolStatus(testPoolName1, status)
}, 30, 5).Should(Not(HaveOccurred()))

Eventually(func(g Gomega) error {
status := ipamv1alpha1.IPPoolStatus{
Allocations: []ipamv1alpha1.Allocation{
{
NodeName: testNodeName,
StartIP: "10.100.0.2",
EndIP: "10.100.0.254",
},
pool2.Status = ipamv1alpha1.IPPoolStatus{
Allocations: []ipamv1alpha1.Allocation{
{
NodeName: testNodeName,
StartIP: "10.100.0.2",
EndIP: "10.100.0.254",
},
}
return updatePoolStatus(testPoolName2, status)
}, 30, 5).Should(Not(HaveOccurred()))
}}
ExpectWithOffset(1, k8sClient.Status().Update(ctx, pool2))
}

func updatePoolStatus(poolName string, status ipamv1alpha1.IPPoolStatus) error {
pool := &ipamv1alpha1.IPPool{}
err := k8sClient.Get(ctx, types.NamespacedName{Name: poolName, Namespace: testNamespace}, pool)
if err != nil {
return err
func createTestCIDRPools() {
pool1GatewayIndex := uint(1)
pool1 := &ipamv1alpha1.CIDRPool{
ObjectMeta: metav1.ObjectMeta{Name: testPoolName1, Namespace: testNamespace},
Spec: ipamv1alpha1.CIDRPoolSpec{
CIDR: "192.100.0.0/16",
GatewayIndex: &pool1GatewayIndex,
PerNodeNetworkPrefix: 24,
Exclusions: []ipamv1alpha1.ExcludeRange{
{StartIP: "192.100.0.1", EndIP: "192.100.0.10"},
},
},
}
pool.Status = status
err = k8sClient.Status().Update(ctx, pool)
if err != nil {
return err
ExpectWithOffset(1, k8sClient.Create(ctx, pool1))
pool1.Status = ipamv1alpha1.CIDRPoolStatus{
Allocations: []ipamv1alpha1.CIDRPoolAllocation{
{
NodeName: testNodeName,
Prefix: "192.100.0.0/24",
Gateway: "192.100.0.1",
},
}}
ExpectWithOffset(1, k8sClient.Status().Update(ctx, pool1))

pool2 := &ipamv1alpha1.CIDRPool{
ObjectMeta: metav1.ObjectMeta{Name: testPoolName2, Namespace: testNamespace},
Spec: ipamv1alpha1.CIDRPoolSpec{
CIDR: "10.200.0.0/24",
PerNodeNetworkPrefix: 31,
},
}
return nil
ExpectWithOffset(1, k8sClient.Create(ctx, pool2))
pool2.Status = ipamv1alpha1.CIDRPoolStatus{
Allocations: []ipamv1alpha1.CIDRPoolAllocation{
{
NodeName: testNodeName,
Prefix: "10.200.0.0/31",
},
}}
ExpectWithOffset(1, k8sClient.Status().Update(ctx, pool2))
}

func createTestPod() *corev1.Pod {
Expand Down Expand Up @@ -161,10 +179,13 @@ var _ = Describe("IPAM Node daemon", func() {
It("Validate main flows", func() {
done := make(chan interface{})
go func() {
defer GinkgoRecover()
defer close(done)
testDir := GinkgoT().TempDir()
opts := getOptions(testDir)

createTestPools()
createTestIPPools()
createTestCIDRPools()
pod := createTestPod()

ctx = logr.NewContext(ctx, klog.NewKlogr())
Expand All @@ -180,38 +201,41 @@ var _ = Describe("IPAM Node daemon", func() {

grpcClient := nodev1.NewIPAMServiceClient(conn)

params := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace)

// no allocation yet
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(status.Code(err) == codes.NotFound).To(BeTrue())

// allocate
resp, err := grpcClient.Allocate(ctx, &nodev1.AllocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())
Expect(resp.Allocations).To(HaveLen(2))
Expect(resp.Allocations[0].Pool).NotTo(BeEmpty())
Expect(resp.Allocations[0].Gateway).NotTo(BeEmpty())
Expect(resp.Allocations[0].Ip).NotTo(BeEmpty())

_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// deallocate
_, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// deallocate should be idempotent
_, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// check should fail
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(status.Code(err) == codes.NotFound).To(BeTrue())
close(done)
cidrPoolParams := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace)
cidrPoolParams.PoolType = nodev1.PoolType_POOL_TYPE_CIDRPOOL
ipPoolParams := getValidReqParams(string(pod.UID), pod.Name, pod.Namespace)

for _, params := range []*nodev1.IPAMParameters{ipPoolParams, cidrPoolParams} {
// no allocation yet
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(status.Code(err) == codes.NotFound).To(BeTrue())

// allocate
resp, err := grpcClient.Allocate(ctx, &nodev1.AllocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())
Expect(resp.Allocations).To(HaveLen(2))
Expect(resp.Allocations[0].Pool).NotTo(BeEmpty())
Expect(resp.Allocations[0].Gateway).NotTo(BeEmpty())
Expect(resp.Allocations[0].Ip).NotTo(BeEmpty())

_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// deallocate
_, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// deallocate should be idempotent
_, err = grpcClient.Deallocate(ctx, &nodev1.DeallocateRequest{Parameters: params})
Expect(err).NotTo(HaveOccurred())

// check should fail
_, err = grpcClient.IsAllocated(ctx,
&nodev1.IsAllocatedRequest{Parameters: params})
Expect(status.Code(err) == codes.NotFound).To(BeTrue())
}
}()
Eventually(done, 5*time.Minute).Should(BeClosed())
})
Expand Down
7 changes: 7 additions & 0 deletions pkg/cni/plugin/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (

nodev1 "github.com/Mellanox/nvidia-k8s-ipam/api/grpc/nvidia/ipam/node/v1"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/cni/types"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/common"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/version"
)

Expand Down Expand Up @@ -197,8 +198,14 @@ func cniConfToGRPCReq(conf *types.NetConf, args *skel.CmdArgs) (*nodev1.IPAMPara
if err != nil {
return nil, fmt.Errorf("failed to load extra CNI args: %v", err)
}

poolType := nodev1.PoolType_POOL_TYPE_IPPOOL
if conf.IPAM.PoolType == common.PoolTypeCIDRPool {
poolType = nodev1.PoolType_POOL_TYPE_CIDRPOOL
}
req := &nodev1.IPAMParameters{
Pools: conf.IPAM.Pools,
PoolType: poolType,
CniIfname: args.IfName,
CniContainerid: args.ContainerID,
Metadata: &nodev1.IPAMMetadata{
Expand Down
8 changes: 5 additions & 3 deletions pkg/cni/plugin/plugin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ var _ = Describe("plugin tests", func() {
mockDaemonClient.On("Allocate", mock.Anything, &nodev1.AllocateRequest{
Parameters: &nodev1.IPAMParameters{
Pools: []string{"my-pool"},
PoolType: nodev1.PoolType_POOL_TYPE_IPPOOL,
CniIfname: "net1",
CniContainerid: "1234",
Metadata: &nodev1.IPAMMetadata{
Expand All @@ -92,9 +93,10 @@ var _ = Describe("plugin tests", func() {
},
}}).Return(&nodev1.AllocateResponse{
Allocations: []*nodev1.AllocationInfo{{
Pool: "my-pool",
Ip: "192.168.1.10/24",
Gateway: "192.168.1.1",
Pool: "my-pool",
Ip: "192.168.1.10/24",
Gateway: "192.168.1.1",
PoolType: nodev1.PoolType_POOL_TYPE_IPPOOL,
}},
}, nil)
err := p.CmdAdd(args)
Expand Down
16 changes: 16 additions & 0 deletions pkg/cni/types/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (
"strings"

"github.com/containernetworking/cni/pkg/types"

"github.com/Mellanox/nvidia-k8s-ipam/pkg/common"
)

const (
Expand Down Expand Up @@ -52,6 +54,9 @@ type IPAMConf struct {

// PoolName is the name of the pool to be used to allocate IP
PoolName string `json:"poolName,omitempty"`
// PoolType is the type of the pool which is referred by the PoolName,
// supported values: ippool, cidrpool
PoolType string `json:"poolType,omitempty"`
// Address of the NVIDIA-ipam DaemonSocket
DaemonSocket string `json:"daemonSocket,omitempty"`
DaemonCallTimeoutSeconds int `json:"daemonCallTimeoutSeconds,omitempty"`
Expand Down Expand Up @@ -106,6 +111,7 @@ func (cl *confLoader) LoadConf(bytes []byte) (*NetConf, error) {
defaultConf := &IPAMConf{
// use network name as pool name by default
PoolName: n.Name,
PoolType: common.PoolTypeIPPool,
ConfDir: DefaultConfDir,
LogFile: DefaultLogFile,
DaemonSocket: DefaultDaemonSocket,
Expand All @@ -119,6 +125,12 @@ func (cl *confLoader) LoadConf(bytes []byte) (*NetConf, error) {
return nil, err
}

n.IPAM.PoolType = strings.ToLower(n.IPAM.PoolType)
if n.IPAM.PoolType != common.PoolTypeIPPool && n.IPAM.PoolType != common.PoolTypeCIDRPool {
return nil, fmt.Errorf("unsupported poolType %s, supported values: %s, %s",
n.IPAM.PoolType, common.PoolTypeIPPool, common.PoolTypeCIDRPool)
}

return n, nil
}

Expand Down Expand Up @@ -170,6 +182,10 @@ func (cl *confLoader) overlayConf(from, to *IPAMConf) {
to.PoolName = from.PoolName
}

if to.PoolType == "" {
to.PoolType = from.PoolType
}

if to.DaemonSocket == "" {
to.DaemonSocket = from.DaemonSocket
}
Expand Down
7 changes: 7 additions & 0 deletions pkg/common/consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,10 @@ const (
// IPAMName is the name of the IPAM plugin
IPAMName = "nvidia-k8s-ipam"
)

const (
// PoolTypeIPPool contains string representation for pool type of IPPool
PoolTypeIPPool = "ippool"
// PoolTypeCIDRPool contains string representation for pool type of CIDRPool
PoolTypeCIDRPool = "cidrpool"
)
Loading
Loading