Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add multi-az test based on ClusterClass #3397

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions test/e2e/data/e2e_conf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,8 @@ providers:
- sourcePath: "./infrastructure-aws/generated/cluster-template-peered-remote.yaml"
- sourcePath: "./infrastructure-aws/generated/cluster-template-internal-elb.yaml"
- sourcePath: "./infrastructure-aws/kustomize_sources/topology/clusterclass-quick-start.yaml"
- sourcePath: "./infrastructure-aws/generated/cluster-template-multi-az-clusterclass.yaml"
- sourcePath: "./infrastructure-aws/kustomize_sources/multi-az-clusterclass/clusterclass-multi-az.yaml"
- sourcePath: "./shared/v1beta1_provider/metadata.yaml"
- sourcePath: "./infrastructure-aws/generated/cluster-template-ignition.yaml"
replacements:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
cni: ${CLUSTER_NAME}-crs-0
name: "${CLUSTER_NAME}"
spec:
clusterNetwork:
pods:
cidrBlocks: ["192.168.0.0/16"]
topology:
class: "multi-az"
version: "${KUBERNETES_VERSION}"
controlPlane:
replicas: "${CONTROL_PLANE_MACHINE_COUNT}"
workers:
machineDeployments:
- class: "default-worker"
name: "md-0"
replicas: "${WORKER_MACHINE_COUNT}"
variables:
- name: region
value: "${AWS_REGION}"
- name: sshKeyName
value: "${AWS_SSH_KEY_NAME}"
- name: controlPlaneMachineType
value: "${AWS_CONTROL_PLANE_MACHINE_TYPE}"
- name: workerMachineType
value: "${AWS_NODE_MACHINE_TYPE}"
- name: networkSubnets
value:
- availabilityZone: "${AWS_AVAILABILITY_ZONE_1}"
cidrBlock: "10.0.0.0/24"
- availabilityZone: "${AWS_AVAILABILITY_ZONE_1}"
cidrBlock: "10.0.1.0/24"
isPublic: true
- availabilityZone: "${AWS_AVAILABILITY_ZONE_2}"
cidrBlock: "10.0.2.0/24"
- availabilityZone: "${AWS_AVAILABILITY_ZONE_2}"
cidrBlock: "10.0.3.0/24"
isPublic: true
---
apiVersion: v1
data: ${CNI_RESOURCES}
kind: ConfigMap
metadata:
name: cni-${CLUSTER_NAME}-crs-0
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
name: ${CLUSTER_NAME}-crs-0
spec:
clusterSelector:
matchLabels:
cni: ${CLUSTER_NAME}-crs-0
resources:
- kind: ConfigMap
name: cni-${CLUSTER_NAME}-crs-0
strategy: ApplyOnce
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: ClusterClass
metadata:
name: multi-az
spec:
controlPlane:
ref:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlaneTemplate
name: multi-az-control-plane
machineInfrastructure:
ref:
kind: AWSMachineTemplate
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
name: multi-az-control-plane
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSClusterTemplate
name: multi-az
workers:
machineDeployments:
- class: default-worker
template:
bootstrap:
ref:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: multi-az-worker-bootstraptemplate
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSMachineTemplate
name: multi-az-worker-machinetemplate
variables:
- name: region
required: true
schema:
openAPIV3Schema:
type: string
default: us-east-1
- name: sshKeyName
required: true
schema:
openAPIV3Schema:
type: string
default: default
- name: controlPlaneMachineType
required: true
schema:
openAPIV3Schema:
type: string
default: t3.large
- name: workerMachineType
required: true
schema:
openAPIV3Schema:
type: string
default: t3.large
- name: networkSubnets
required: false
schema:
openAPIV3Schema:
type: array
items:
type: object
properties:
id:
type: string
cidrBlock:
type: string
availabilityZone:
type: string
isPublic:
type: boolean
routeTableID:
type: string
natGatewayID:
type: string
tags:
type: object
patches:
- name: awsClusterTemplateGeneral
definitions:
- selector:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSClusterTemplate
matchResources:
infrastructureCluster: true
jsonPatches:
- op: add
path: "/spec/template/spec/region"
valueFrom:
variable: region
- op: add
path: /spec/template/spec/sshKeyName
valueFrom:
variable: sshKeyName
- op: add
path: /spec/template/spec/network/subnets
valueFrom:
variable: networkSubnets
- name: awsMachineTemplateControlPlane
definitions:
- selector:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSMachineTemplate
matchResources:
controlPlane: true
jsonPatches:
- op: replace
path: "/spec/template/spec/instanceType"
valueFrom:
variable: controlPlaneMachineType
- op: add
path: /spec/template/spec/sshKeyName
valueFrom:
variable: sshKeyName
- name: awsMachineTemplateWorker
definitions:
- selector:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSMachineTemplate
matchResources:
machineDeploymentClass:
names:
- default-worker
jsonPatches:
- op: replace
path: "/spec/template/spec/instanceType"
valueFrom:
variable: workerMachineType
- op: add
path: /spec/template/spec/sshKeyName
valueFrom:
variable: sshKeyName
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSClusterTemplate
metadata:
name: multi-az
spec:
template:
spec: {}
---
kind: KubeadmControlPlaneTemplate
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
metadata:
name: multi-az-control-plane
spec:
template:
spec:
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
extraArgs:
cloud-provider: aws
controllerManager:
extraArgs:
cloud-provider: aws
initConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
cloud-provider: aws
joinConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
cloud-provider: aws
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSMachineTemplate
metadata:
name: multi-az-control-plane
spec:
template:
spec:
# instanceType is a required field (OpenAPI schema).
instanceType: REPLACEME
iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: AWSMachineTemplate
metadata:
name: multi-az-worker-machinetemplate
spec:
template:
spec:
# instanceType is a required field (OpenAPI schema).
instanceType: REPLACEME
iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io"
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: "multi-az-worker-bootstraptemplate"
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
name: '{{ ds.meta_data.local_hostname }}'
kubeletExtraArgs:
cloud-provider: aws
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- cluster-template.yaml
1 change: 1 addition & 0 deletions test/e2e/shared/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ const (
AwsAvailabilityZone1 = "AWS_AVAILABILITY_ZONE_1"
AwsAvailabilityZone2 = "AWS_AVAILABILITY_ZONE_2"
MultiAzFlavor = "multi-az"
MultiAzClusterClassFlavor = "multi-az-clusterclass"
LimitAzFlavor = "limit-az"
SpotInstancesFlavor = "spot-instances"
SSMFlavor = "ssm"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
//go:build e2e
// +build e2e

/*
Copyright 2022 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package unmanaged

import (
"context"
"fmt"
"os"

"github.com/gofrs/flock"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
. "github.com/onsi/gomega"
"k8s.io/utils/pointer"

"sigs.k8s.io/cluster-api-provider-aws/test/e2e/shared"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)

var _ = ginkgo.Context("[unmanaged] [functional] [ClusterClass]", func() {
var (
ctx context.Context
result *clusterctl.ApplyClusterTemplateAndWaitResult
requiredResources *shared.TestResource
)

ginkgo.BeforeEach(func() {
ctx = context.TODO()
result = &clusterctl.ApplyClusterTemplateAndWaitResult{}
})

ginkgo.Describe("Workload cluster in multiple AZs [ClusterClass]", func() {
ginkgo.It("It should be creatable and deletable", func() {
specName := "functional-test-multi-az-clusterclass"
requiredResources = &shared.TestResource{EC2Normal: 3 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3}
requiredResources.WriteRequestedResources(e2eCtx, specName)
Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
ginkgo.By("Creating a cluster")
clusterName := fmt.Sprintf("cluster-%s", util.RandomString(6))
configCluster := defaultConfigCluster(clusterName, namespace.Name)
configCluster.ControlPlaneMachineCount = pointer.Int64Ptr(3)
configCluster.Flavor = shared.MultiAzClusterClassFlavor
cluster, _, _ := createCluster(ctx, configCluster, result)

ginkgo.By("Adding worker nodes to additional subnets")
mdName1 := clusterName + "-md-1"
mdName2 := clusterName + "-md-2"
md1 := makeMachineDeployment(namespace.Name, mdName1, clusterName, 1)
md2 := makeMachineDeployment(namespace.Name, mdName2, clusterName, 1)
az1 := os.Getenv(shared.AwsAvailabilityZone1)
az2 := os.Getenv(shared.AwsAvailabilityZone2)

// private CIDRs set in cluster-template-multi-az.yaml.
framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{
Creator: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
MachineDeployment: md1,
BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName1),
InfraMachineTemplate: makeAWSMachineTemplate(namespace.Name, mdName1, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), pointer.StringPtr(az1), getSubnetID("cidr-block", "10.0.0.0/24", clusterName)),
})
framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{
Creator: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
MachineDeployment: md2,
BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName2),
InfraMachineTemplate: makeAWSMachineTemplate(namespace.Name, mdName2, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), pointer.StringPtr(az2), getSubnetID("cidr-block", "10.0.2.0/24", clusterName)),
})

ginkgo.By("Waiting for new worker nodes to become ready")
k8sClient := e2eCtx.Environment.BootstrapClusterProxy.GetClient()
framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{Lister: k8sClient, Cluster: cluster, MachineDeployment: md1}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...)
framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{Lister: k8sClient, Cluster: cluster, MachineDeployment: md2}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...)
})
})
})