Skip to content

Commit

Permalink
Merge pull request #11 from ykulazhenkov/controller-fixes
Browse files Browse the repository at this point in the history
Fix know issues in IPAM controller implementation and add tests
  • Loading branch information
adrianchiris committed May 8, 2023
2 parents 75b9721 + afa3254 commit e4a3f7b
Show file tree
Hide file tree
Showing 15 changed files with 861 additions and 117 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ DOCKERFILE ?= Dockerfile
DOCKER_CMD ?= docker

# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.26.4
ENVTEST_K8S_VERSION = 1.27.1

# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
Expand Down
17 changes: 8 additions & 9 deletions cmd/ipam-controller/app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
Expand Down Expand Up @@ -62,7 +63,11 @@ func NewControllerCommand() *cobra.Command {
if err := opts.Validate(); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
return RunController(logr.NewContext(ctx, klog.NewKlogr()), opts)
conf, err := ctrl.GetConfig()
if err != nil {
return fmt.Errorf("failed to read config for k8s client: %v", err)
}
return RunController(logr.NewContext(ctx, klog.NewKlogr()), conf, opts)
},
Args: func(cmd *cobra.Command, args []string) error {
for _, arg := range args {
Expand All @@ -88,7 +93,7 @@ func NewControllerCommand() *cobra.Command {
}

// RunController start IPAM controller with provided options
func RunController(ctx context.Context, opts *options.Options) error {
func RunController(ctx context.Context, config *rest.Config, opts *options.Options) error {
logger := logr.FromContextOrDiscard(ctx)
ctrl.SetLogger(logger)

Expand All @@ -103,13 +108,7 @@ func RunController(ctx context.Context, opts *options.Options) error {
return err
}

conf, err := ctrl.GetConfig()
if err != nil {
logger.Error(err, "failed to read config for k8s client")
return err
}

mgr, err := ctrl.NewManager(conf, ctrl.Options{
mgr, err := ctrl.NewManager(config, ctrl.Options{
Scheme: scheme,
NewCache: cache.BuilderWithOptions(cache.Options{
SelectorsByObject: cache.SelectorsByObject{&corev1.ConfigMap{}: cache.ObjectSelector{
Expand Down
93 changes: 93 additions & 0 deletions cmd/ipam-controller/app/app_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
/*
Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
property and proprietary rights in and to this material, related
documentation and any modifications thereto. Any use, reproduction,
disclosure or distribution of this material and related documentation
without an express license agreement from NVIDIA CORPORATION or
its affiliates is strictly prohibited.
SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES
SPDX-License-Identifier: LicenseRef-NvidiaProprietary
*/

package app_test

import (
"context"
"sync"
"testing"

"github.com/go-logr/logr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"

"github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-controller/app"
"github.com/Mellanox/nvidia-k8s-ipam/cmd/ipam-controller/app/options"
)

const (
TestNamespace = "test-ns"
TestConfigMapName = "test-config"
)

var (
cfg *rest.Config
k8sClient client.Client
testEnv *envtest.Environment
cFunc context.CancelFunc
ctx context.Context
wg sync.WaitGroup
)

func TestApp(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "IPAM Controller Suite")
}

var _ = BeforeSuite(func() {
By("bootstrapping test environment")
testEnv = &envtest.Environment{}

ctx, cFunc = context.WithCancel(context.Background())

var err error
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())

k8sClient, err = client.New(cfg, client.Options{})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())

Expect(k8sClient.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: TestNamespace}})).To(BeNil())

wg.Add(1)
go func() {
err := app.RunController(logr.NewContext(ctx, klog.NewKlogr()), cfg, &options.Options{
ConfigMapName: TestConfigMapName,
ConfigMapNamespace: TestNamespace,
})
if err != nil {
panic(err.Error())
}
wg.Done()
}()
})

var _ = AfterSuite(func() {
By("stop controller")
cFunc()
wg.Wait()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
181 changes: 181 additions & 0 deletions cmd/ipam-controller/app/app_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
package app_test

import (
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"

"github.com/Mellanox/nvidia-k8s-ipam/pkg/ipam-controller/config"
"github.com/Mellanox/nvidia-k8s-ipam/pkg/pool"
)

func updateConfigMap(data string) {
d := map[string]string{config.ConfigMapKey: data}
err := k8sClient.Create(ctx, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: TestConfigMapName, Namespace: TestNamespace},
Data: d,
})
if err == nil {
return
}
if apiErrors.IsAlreadyExists(err) {
configMap := &corev1.ConfigMap{}
Expect(k8sClient.Get(
ctx, types.NamespacedName{Name: TestConfigMapName, Namespace: TestNamespace}, configMap)).NotTo(HaveOccurred())
configMap.Data = d
Expect(k8sClient.Update(
ctx, configMap)).NotTo(HaveOccurred())
} else {
Expect(err).NotTo(HaveOccurred())
}
}

var validConfig = `
{
"pools": {
"pool1": { "subnet": "192.168.0.0/16", "perNodeBlockSize": 10 , "gateway": "192.168.0.1"},
"pool2": { "subnet": "172.16.0.0/16", "perNodeBlockSize": 50 , "gateway": "172.16.0.1"}
}
}
`

// ranges for two nodes only can be allocated
var validConfig2 = `
{
"pools": {
"pool3": { "subnet": "172.17.0.0/24", "perNodeBlockSize": 100 , "gateway": "172.17.0.1"}
},
"nodeSelector": {"foo": "bar"}
}
`

var invalidConfig = `{{{`

func createNode(name string) *corev1.Node {
node := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: name}}
Expect(k8sClient.Create(ctx, node)).NotTo(HaveOccurred())
return node
}

func getNode(name string) *corev1.Node {
node := &corev1.Node{}
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: name}, node)).NotTo(HaveOccurred())
return node
}

func updateNode(node *corev1.Node) *corev1.Node {
Expect(k8sClient.Update(ctx, node)).NotTo(HaveOccurred())
return node
}

func getRangeFromNode(nodeName string) map[string]*pool.IPPool {
node := getNode(nodeName)
mgr, err := pool.NewManagerImpl(node)
if err != nil {
return nil
}
return mgr.GetPools()
}

// WaitAndCheckForStability wait for condition and then check it is stable for 1 second
func WaitAndCheckForStability(check func(g Gomega), wait interface{}, stability interface{}) {
Eventually(func(g Gomega) { check(g) }, wait).Should(Succeed())
Consistently(func(g Gomega) { check(g) }, stability).Should(Succeed())
}

var _ = Describe("App", func() {
It("Basic tests", func() {

By("Create config and wait for controller to set ranges")

updateConfigMap(validConfig)
testNode1 := "node1"
testNode2 := "node2"
testNode3 := "node3"

pools := []string{"pool1", "pool2"}

createNode(testNode1)
createNode(testNode2)
createNode(testNode3)

WaitAndCheckForStability(func(g Gomega) {
uniqStartEndIPs := map[string]struct{}{}
node1Ranges := getRangeFromNode(testNode1)
node2Ranges := getRangeFromNode(testNode2)
node3Ranges := getRangeFromNode(testNode3)
for _, r := range []map[string]*pool.IPPool{node1Ranges, node2Ranges, node3Ranges} {
g.Expect(r).To(HaveLen(len(pools)))
for _, p := range pools {
if r[p] != nil {
uniqStartEndIPs[r[p].StartIP] = struct{}{}
uniqStartEndIPs[r[p].EndIP] = struct{}{}
g.Expect(r[p].Gateway).NotTo(BeEmpty())
g.Expect(r[p].Subnet).NotTo(BeEmpty())
}
}
}
// we should have unique start/end IPs for each node for each pool
g.Expect(uniqStartEndIPs).To(HaveLen(len(pools) * 3 * 2))
}, 15, 2)

By("Set invalid config")
updateConfigMap(invalidConfig)
time.Sleep(time.Second)

By("Set valid config, which ignores all nodes")

updateConfigMap(validConfig2)

WaitAndCheckForStability(func(g Gomega) {
g.Expect(getRangeFromNode(testNode1)).To(BeNil())
g.Expect(getRangeFromNode(testNode2)).To(BeNil())
g.Expect(getRangeFromNode(testNode3)).To(BeNil())
}, 15, 2)

By("Update node to match selector")

// node1 should have range
node1 := getNode(testNode1)
node1.Labels = map[string]string{"foo": "bar"}
updateNode(node1)
WaitAndCheckForStability(func(g Gomega) {
g.Expect(getRangeFromNode(testNode1)).NotTo(BeNil())
}, 15, 2)

// node2 should have range
node2 := getNode(testNode2)
node2.Labels = map[string]string{"foo": "bar"}
var node2Ranges map[string]*pool.IPPool
updateNode(node2)
WaitAndCheckForStability(func(g Gomega) {
node2Ranges = getRangeFromNode(testNode2)
g.Expect(node2Ranges).NotTo(BeNil())
}, 15, 2)

// node3 should have no range, because no free ranges available
node3 := getNode(testNode3)
node3.Labels = map[string]string{"foo": "bar"}
updateNode(node3)
WaitAndCheckForStability(func(g Gomega) {
g.Expect(getRangeFromNode(testNode3)).To(BeNil())
}, 15, 5)

// remove label from node2, node3 should have a range now
node2 = getNode(testNode2)
node2.Labels = nil
updateNode(node2)
WaitAndCheckForStability(func(g Gomega) {
node3Ranges := getRangeFromNode(testNode3)
g.Expect(node3Ranges).NotTo(BeNil())
// should reuse ranges from node2
g.Expect(node3Ranges).To(Equal(node2Ranges))
}, 15, 2)
})
})
Loading

0 comments on commit e4a3f7b

Please sign in to comment.