Skip to content

Commit

Permalink
Add informer sync test to prepare command
Browse files Browse the repository at this point in the history
  • Loading branch information
dtomcej authored Feb 12, 2020
1 parent ccc118f commit 36966ac
Show file tree
Hide file tree
Showing 7 changed files with 114 additions and 7 deletions.
2 changes: 2 additions & 0 deletions cmd/configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ type PrepareConfig struct {
Debug bool `description:"Debug mode" export:"true"`
Namespace string `description:"The namespace that maesh is installed in." export:"true"`
ClusterDomain string `description:"Your internal K8s cluster domain." export:"true"`
SMI bool `description:"Enable SMI operation" export:"true"`
}

// NewPrepareConfig creates PrepareConfig.
Expand All @@ -45,5 +46,6 @@ func NewPrepareConfig() *PrepareConfig {
Debug: false,
Namespace: "maesh",
ClusterDomain: "cluster.local",
SMI: false,
}
}
10 changes: 7 additions & 3 deletions cmd/prepare/prepare.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,20 @@ import (
log "github.com/sirupsen/logrus"
)

// NewCmd builds a new Patch command.
// NewCmd builds a new Prepare command.
func NewCmd(pConfig *cmd.PrepareConfig, loaders []cli.ResourceLoader) *cli.Command {
return &cli.Command{
Name: "prepare",
Description: `Prepare command.`,
Configuration: pConfig,
Run: func(_ []string) error {
return patchCommand(pConfig)
return prepareCommand(pConfig)
},
Resources: loaders,
}
}

func patchCommand(pConfig *cmd.PrepareConfig) error {
func prepareCommand(pConfig *cmd.PrepareConfig) error {
log.SetOutput(os.Stdout)
log.SetLevel(log.InfoLevel)

Expand All @@ -44,6 +44,10 @@ func patchCommand(pConfig *cmd.PrepareConfig) error {
return fmt.Errorf("error during cluster check: %v", err)
}

if err = clients.CheckInformersStart(pConfig.SMI); err != nil {
return fmt.Errorf("error during informer check: %v, this can be caused by pre-existing objects in your cluster that do not conform to the spec", err)
}

if err = clients.InitCluster(pConfig.Namespace, pConfig.ClusterDomain); err != nil {
return fmt.Errorf("error initializing cluster: %v", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,9 @@ spec:
{{- if .Values.controller.logging.debug }}
- "--debug"
{{- end }}
{{- if .Values.smi.enable }}
- "--smi"
{{- end }}
- "--clusterdomain"
- {{ default "cluster.local" .Values.clusterDomain | quote }}
- "--namespace=$(POD_NAMESPACE)"
Expand Down
7 changes: 3 additions & 4 deletions integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,17 +116,16 @@ func (s *BaseSuite) maeshPrepareWithArgs(args ...string) *exec.Cmd {

func (s *BaseSuite) startMaeshBinaryCmd(c *check.C, smi bool) *exec.Cmd {
args := []string{}
if smi {
args = append(args, "--smi")
}

cmd := s.maeshPrepareWithArgs(args...)
cmd.Env = os.Environ()
output, err := cmd.CombinedOutput()
c.Log(string(output))
c.Assert(err, checker.IsNil)

if smi {
args = []string{"--smi"}
}

// Ignore the kube-system namespace since we don't care about system events.
args = append(args, "--ignoreNamespaces=kube-system")

Expand Down
20 changes: 20 additions & 0 deletions integration/resources/smi/access-control-broken/1.tts.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
---
kind: TrafficTarget
apiVersion: access.smi-spec.io/v1alpha1
metadata:
name: a-b
namespace: test
destination:
kind: ServiceAccount
name: true
namespace: test
specs:
- kind: HTTPRouteGroup
name: app-routes
namespace: 0
matches:
- foo
sources:
- kind: ServiceAccount
name: a
namespace: false
14 changes: 14 additions & 0 deletions integration/smi_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package integration

import (
"fmt"
"os"

"github.com/containous/traefik/v2/pkg/config/dynamic"
"github.com/go-check/check"
Expand Down Expand Up @@ -45,6 +46,19 @@ func (s *SMISuite) TestSMIAccessControl(c *check.C) {
s.checkTCPServiceServerURLs(c, config)
}

func (s *SMISuite) TestSMIAccessControlPrepareFail(c *check.C) {
s.createResources(c, "resources/smi/access-control-broken/")
defer s.deleteResources(c, "resources/smi/access-control-broken/", false)

args := []string{"--smi"}
cmd := s.maeshPrepareWithArgs(args...)
cmd.Env = os.Environ()
output, err := cmd.CombinedOutput()

c.Log(string(output))
c.Assert(err, checker.NotNil)
}

func (s *SMISuite) TestSMITrafficSplit(c *check.C) {
s.createResources(c, "resources/smi/traffic-split/")
defer s.deleteResources(c, "resources/smi/traffic-split/", true)
Expand Down
65 changes: 65 additions & 0 deletions internal/k8s/client.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package k8s

import (
"context"
"encoding/json"
"errors"
"fmt"
Expand All @@ -12,16 +13,21 @@ import (
"github.com/containous/traefik/v2/pkg/safe"

smiAccessClientset "github.com/deislabs/smi-sdk-go/pkg/gen/client/access/clientset/versioned"
accessInformer "github.com/deislabs/smi-sdk-go/pkg/gen/client/access/informers/externalversions"
smiSpecsClientset "github.com/deislabs/smi-sdk-go/pkg/gen/client/specs/clientset/versioned"
specsInformer "github.com/deislabs/smi-sdk-go/pkg/gen/client/specs/informers/externalversions"
smiSplitClientset "github.com/deislabs/smi-sdk-go/pkg/gen/client/split/clientset/versioned"
splitInformer "github.com/deislabs/smi-sdk-go/pkg/gen/client/split/informers/externalversions"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
kubeerror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)

Expand Down Expand Up @@ -157,6 +163,65 @@ func (w *ClientWrapper) KubeDNSMatch() (bool, error) {
return true, nil
}

// CheckInformersStart checks if the required informers can start and sync in a reasonable time.
func (w *ClientWrapper) CheckInformersStart(smi bool) error {
log.Debug("Creating and Starting Informers")

stopCh := make(chan struct{})

ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()

// Create a new SharedInformerFactory, and register the event handler to informers.
kubeFactory := informers.NewSharedInformerFactoryWithOptions(w.KubeClient, ResyncPeriod)
kubeFactory.Core().V1().Services().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{})
kubeFactory.Core().V1().Endpoints().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{})
kubeFactory.Core().V1().Pods().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{})
kubeFactory.Start(stopCh)

for t, ok := range kubeFactory.WaitForCacheSync(ctx.Done()) {
if !ok {
return fmt.Errorf("timed out waiting for controller caches to sync: %s", t.String())
}
}

if smi {
// Create new SharedInformerFactories, and register the event handler to informers.
accessFactory := accessInformer.NewSharedInformerFactoryWithOptions(w.SmiAccessClient, ResyncPeriod)
accessFactory.Access().V1alpha1().TrafficTargets().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{})
accessFactory.Start(stopCh)

for t, ok := range accessFactory.WaitForCacheSync(ctx.Done()) {
if !ok {
return fmt.Errorf("timed out waiting for controller caches to sync: %s", t.String())
}
}

specsFactory := specsInformer.NewSharedInformerFactoryWithOptions(w.SmiSpecsClient, ResyncPeriod)
specsFactory.Specs().V1alpha1().HTTPRouteGroups().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{})
specsFactory.Specs().V1alpha1().TCPRoutes().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{})
specsFactory.Start(stopCh)

for t, ok := range specsFactory.WaitForCacheSync(ctx.Done()) {
if !ok {
return fmt.Errorf("timed out waiting for controller caches to sync: %s", t.String())
}
}

splitFactory := splitInformer.NewSharedInformerFactoryWithOptions(w.SmiSplitClient, ResyncPeriod)
splitFactory.Split().V1alpha2().TrafficSplits().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{})
splitFactory.Start(stopCh)

for t, ok := range splitFactory.WaitForCacheSync(ctx.Done()) {
if !ok {
return fmt.Errorf("timed out waiting for controller caches to sync: %s", t.String())
}
}
}

return nil
}

// isCoreDNSVersionSupported returns true if the provided string contains a supported CoreDNS version.
func isCoreDNSVersionSupported(versionLine string) bool {
for _, v := range supportedCoreDNSVersions {
Expand Down

0 comments on commit 36966ac

Please sign in to comment.