Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🌱 test/e2e/in-memory: enable unit tests #8886

Merged
merged 1 commit into from
Jun 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions scripts/ci-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ echo -e "\n*** Testing Cluster API Provider Docker ***\n"
# Docker provider
make test-docker-infrastructure-junit

echo -e "\n*** Testing Cluster API Provider In-Memory ***\n"
# Docker provider
make test-in-memory-infrastructure-junit

echo -e "\n*** Testing Cluster API Runtime SDK test extension ***\n"
# Test Extension
make test-test-extension-junit
Original file line number Diff line number Diff line change
Expand Up @@ -314,8 +314,14 @@ func TestReconcileNormalEtcd(t *testing.T) {
manager := cmanager.New(scheme)

host := "127.0.0.1"
wcmux := server.NewWorkloadClustersMux(manager, host)
_, err := wcmux.InitWorkloadClusterListener(klog.KObj(cluster).String())
wcmux, err := server.NewWorkloadClustersMux(manager, host, server.CustomPorts{
// NOTE: make sure to use ports different than other tests, so we can run tests in parallel
MinPort: server.DefaultMinPort + 1000,
MaxPort: server.DefaultMinPort + 1099,
DebugPort: server.DefaultDebugPort,
})
g.Expect(err).ToNot(HaveOccurred())
_, err = wcmux.InitWorkloadClusterListener(klog.KObj(cluster).String())
g.Expect(err).ToNot(HaveOccurred())

r := InMemoryMachineReconciler{
Expand Down Expand Up @@ -436,8 +442,14 @@ func TestReconcileNormalApiServer(t *testing.T) {
manager := cmanager.New(scheme)

host := "127.0.0.1"
wcmux := server.NewWorkloadClustersMux(manager, host)
_, err := wcmux.InitWorkloadClusterListener(klog.KObj(cluster).String())
wcmux, err := server.NewWorkloadClustersMux(manager, host, server.CustomPorts{
// NOTE: make sure to use ports different than other tests, so we can run tests in parallel
MinPort: server.DefaultMinPort + 1100,
MaxPort: server.DefaultMinPort + 1299,
DebugPort: server.DefaultDebugPort,
})
g.Expect(err).ToNot(HaveOccurred())
_, err = wcmux.InitWorkloadClusterListener(klog.KObj(cluster).String())
g.Expect(err).ToNot(HaveOccurred())

r := InMemoryMachineReconciler{
Expand Down
65 changes: 56 additions & 9 deletions test/infrastructure/inmemory/internal/server/mux.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,52 @@ import (
)

const (
debugPort = 19000
// DefaultDebugPort default debug port of the workload clusters mux.
DefaultDebugPort = 19000

// This range allows for 4k clusters, which is 4 times the goal we have in mind for
// the first iteration of stress tests.

minPort = 20000
maxPort = 24000
// DefaultMinPort default min port of the workload clusters mux.
DefaultMinPort = 20000
// DefaultMaxPort default max port of the workload clusters mux.
DefaultMaxPort = 24000
)

// WorkloadClustersMuxOption define an option for the WorkloadClustersMux creation.
type WorkloadClustersMuxOption interface {
Apply(*WorkloadClustersMuxOptions)
}

// WorkloadClustersMuxOptions are options for the workload clusters mux.
type WorkloadClustersMuxOptions struct {
MinPort int
MaxPort int
DebugPort int
}

// ApplyOptions applies WorkloadClustersMuxOption to the current WorkloadClustersMuxOptions.
func (o *WorkloadClustersMuxOptions) ApplyOptions(opts []WorkloadClustersMuxOption) *WorkloadClustersMuxOptions {
for _, opt := range opts {
opt.Apply(o)
}
return o
}

// CustomPorts allows to customize the ports used by the workload clusters mux.
type CustomPorts struct {
MinPort int
MaxPort int
DebugPort int
}

// Apply applies this configuration to the given WorkloadClustersMuxOptions.
func (c CustomPorts) Apply(options *WorkloadClustersMuxOptions) {
options.MinPort = c.MinPort
options.MaxPort = c.MaxPort
options.DebugPort = c.DebugPort
}

// WorkloadClustersMux implements a server that handles requests for multiple workload clusters.
// Each workload clusters will get its own listener, serving on a dedicated port, eg.
// wkl-cluster-1 >> :20000, wkl-cluster-2 >> :20001 etc.
Expand All @@ -77,12 +114,19 @@ type WorkloadClustersMux struct {
}

// NewWorkloadClustersMux returns a WorkloadClustersMux that handles requests for multiple workload clusters.
func NewWorkloadClustersMux(manager cmanager.Manager, host string) *WorkloadClustersMux {
func NewWorkloadClustersMux(manager cmanager.Manager, host string, opts ...WorkloadClustersMuxOption) (*WorkloadClustersMux, error) {
options := WorkloadClustersMuxOptions{
MinPort: DefaultMinPort,
MaxPort: DefaultMaxPort,
DebugPort: DefaultDebugPort,
}
options.ApplyOptions(opts)

m := &WorkloadClustersMux{
host: host,
minPort: minPort,
maxPort: maxPort,
portIndex: minPort,
minPort: options.MinPort,
maxPort: options.MaxPort,
portIndex: options.MinPort,
manager: manager,
workloadClusterListeners: map[string]*WorkloadClusterListener{},
workloadClusterNameByHost: map[string]string{},
Expand All @@ -107,10 +151,13 @@ func NewWorkloadClustersMux(manager cmanager.Manager, host string) *WorkloadClus
m.debugServer = http.Server{
Handler: api.NewDebugHandler(manager, m.log, m),
}
l, _ := net.Listen("tcp", net.JoinHostPort(host, fmt.Sprintf("%d", debugPort)))
l, err := net.Listen("tcp", net.JoinHostPort(host, fmt.Sprintf("%d", options.DebugPort)))
if err != nil {
return nil, errors.Wrapf(err, "failed to create listener for workload cluster mux")
}
go func() { _ = m.debugServer.Serve(l) }()

return m
return m, nil
}

// mixedHandler returns an handler that can serve either API server calls or etcd calls.
Expand Down
39 changes: 33 additions & 6 deletions test/infrastructure/inmemory/internal/server/mux_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,20 @@ func init() {
}

func TestMux(t *testing.T) {
t.Parallel()
g := NewWithT(t)

manager := cmanager.New(scheme)

wcl := "workload-cluster"
host := "127.0.0.1" //nolint:goconst
wcmux := NewWorkloadClustersMux(manager, host)
wcmux, err := NewWorkloadClustersMux(manager, host, CustomPorts{
// NOTE: make sure to use ports different than other tests, so we can run tests in parallel
MinPort: DefaultMinPort,
MaxPort: DefaultMinPort + 99,
DebugPort: DefaultDebugPort,
})
g.Expect(err).ToNot(HaveOccurred())

listener, err := wcmux.InitWorkloadClusterListener(wcl)
g.Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -115,9 +122,15 @@ func TestMux(t *testing.T) {
}

func TestAPI_corev1_CRUD(t *testing.T) {
t.Parallel()
g := NewWithT(t)

wcmux, c := setupWorkloadClusterListener(g)
wcmux, c := setupWorkloadClusterListener(g, CustomPorts{
// NOTE: make sure to use ports different than other tests, so we can run tests in parallel
MinPort: DefaultMinPort + 100,
MaxPort: DefaultMinPort + 199,
DebugPort: DefaultDebugPort + 1,
})

// create

Expand Down Expand Up @@ -171,9 +184,15 @@ func TestAPI_corev1_CRUD(t *testing.T) {
}

func TestAPI_rbacv1_CRUD(t *testing.T) {
t.Parallel()
g := NewWithT(t)

wcmux, c := setupWorkloadClusterListener(g)
wcmux, c := setupWorkloadClusterListener(g, CustomPorts{
// NOTE: make sure to use ports different than other tests, so we can run tests in parallel
MinPort: DefaultMinPort + 200,
MaxPort: DefaultMinPort + 299,
DebugPort: DefaultDebugPort + 2,
})

// create

Expand Down Expand Up @@ -214,12 +233,19 @@ func TestAPI_rbacv1_CRUD(t *testing.T) {
}

func TestAPI_PortForward(t *testing.T) {
t.Parallel()
g := NewWithT(t)
manager := cmanager.New(scheme)

// TODO: deduplicate this setup code with the test above
host := "127.0.0.1"
wcmux := NewWorkloadClustersMux(manager, host)
wcmux, err := NewWorkloadClustersMux(manager, host, CustomPorts{
// NOTE: make sure to use ports different than other tests, so we can run tests in parallel
MinPort: DefaultMinPort + 300,
MaxPort: DefaultMinPort + 399,
DebugPort: DefaultDebugPort + 3,
})
g.Expect(err).ToNot(HaveOccurred())

// InfraCluster controller >> when "creating the load balancer"
wcl1 := "workload-cluster1"
Expand Down Expand Up @@ -341,11 +367,12 @@ func TestAPI_PortForward(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())
}

func setupWorkloadClusterListener(g Gomega) (*WorkloadClustersMux, client.Client) {
func setupWorkloadClusterListener(g Gomega, ports CustomPorts) (*WorkloadClustersMux, client.Client) {
manager := cmanager.New(scheme)

host := "127.0.0.1"
wcmux := NewWorkloadClustersMux(manager, host)
wcmux, err := NewWorkloadClustersMux(manager, host, ports)
g.Expect(err).ToNot(HaveOccurred())

// InfraCluster controller >> when "creating the load balancer"
wcl1 := "workload-cluster1"
Expand Down
6 changes: 5 additions & 1 deletion test/infrastructure/inmemory/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,11 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {

// Start an http server
podIP := os.Getenv("POD_IP")
apiServerMux := server.NewWorkloadClustersMux(cloudMgr, podIP)
apiServerMux, err := server.NewWorkloadClustersMux(cloudMgr, podIP)
if err != nil {
setupLog.Error(err, "unable to create workload clusters mux")
os.Exit(1)
}

// Setup reconcilers
if err := (&controllers.InMemoryClusterReconciler{
Expand Down
Loading