From f42cd30c5e4491a6b7000c92c8c743ec610abf8d Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Mon, 23 Oct 2023 11:28:39 +0300 Subject: [PATCH 01/10] Improve CRUD suite runtime~ --- .../tests/crud/creategetdelete_test.go | 2018 +++++++---------- .../tests/crud/testdata/scale-nodegroups.yaml | 4 +- .../tests/crud/testdata/taints-max-pods.yaml | 4 +- 3 files changed, 875 insertions(+), 1151 deletions(-) diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index 67a92628f3..cddd9d4fc6 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -12,21 +12,24 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go-v2/aws" - cfntypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" - awsec2 "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go-v2/service/ec2/types" - ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" harness "github.com/dlespiau/kube-test-harness" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" "github.com/pkg/errors" + "gopkg.in/yaml.v2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/yaml" + + "github.com/aws/aws-sdk-go-v2/aws" + cfntypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" + awsec2 "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" . "github.com/weaveworks/eksctl/integration/matchers" "github.com/weaveworks/eksctl/integration/runner" @@ -37,7 +40,6 @@ import ( api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/iam" - iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc" "github.com/weaveworks/eksctl/pkg/testutils" "github.com/weaveworks/eksctl/pkg/utils/file" ) @@ -50,8 +52,7 @@ func init() { if err := api.Register(); err != nil { panic(errors.Wrap(err, "unexpected error registering API scheme")) } - params = tests.NewParams("crud") - + params = tests.NewParamsWithGivenClusterName("crud", "test-cluster") } func TestCRUD(t *testing.T) { @@ -59,14 +60,27 @@ func TestCRUD(t *testing.T) { } const ( - mngNG1 = "mng-1" - mngNG2 = "mng-2" - - unmNG1 = "unm-1" - unmNG2 = "unm-2" + taintsNg1 = "ng-taints-1" + taintsNg2 = "ng-taints-2" + scaleSingleNg = "ng-scale-single" + scaleMultipleNg = "ng-scale-multiple" + + scaleMultipleMng = "mng-scale-multiple" + GPUMng = "mng-gpu" + drainMng = "mng-drain" + newSubnetCLIMng = "mng-new-subnet-CLI" + newSubnetConfigFileMng = "mng-new-subnet-config-file" ) -var _ = BeforeSuite(func() { +func makeClusterConfig() *api.ClusterConfig { + clusterConfig := api.NewClusterConfig() + clusterConfig.Metadata.Name = params.ClusterName + clusterConfig.Metadata.Region = params.Region + clusterConfig.Metadata.Version = params.Version + return clusterConfig +} + +var _ = SynchronizedBeforeSuite(func() { params.KubeconfigTemp = false if params.KubeconfigPath == "" { wd, _ := os.Getwd() @@ -90,1265 +104,912 @@ var _ = BeforeSuite(func() { } return } - fmt.Fprintf(GinkgoWriter, "Using kubeconfig: %s\n", params.KubeconfigPath) - cmd := params.EksctlCreateCmd.WithArgs( - "cluster", - "--verbose", "4", - "--name", params.ClusterName, - "--tags", "alpha.eksctl.io/description=eksctl integration test", - "--nodegroup-name", mngNG1, - "--node-labels", "ng-name="+mngNG1, - "--nodes", "1", - "--version", params.Version, - "--kubeconfig", params.KubeconfigPath, - "--zones", "us-west-2b,us-west-2c", - ) - Expect(cmd).To(RunSuccessfully()) -}) - -var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { - commonTimeout := 10 * time.Minute - nodegroupTimeout := time.Hour.String() - makeClusterConfig := func() *api.ClusterConfig { - clusterConfig := api.NewClusterConfig() - clusterConfig.Metadata.Name = params.ClusterName - clusterConfig.Metadata.Region = params.Region - clusterConfig.Metadata.Version = params.Version - return clusterConfig + cfg := makeClusterConfig() + cfg.NodeGroups = []*api.NodeGroup{ + { + NodeGroupBase: &api.NodeGroupBase{ + Name: scaleSingleNg, + }, + }, + { + NodeGroupBase: &api.NodeGroupBase{ + Name: scaleMultipleNg, + }, + }, } - - makeClientset := func() *kubernetes.Clientset { - config, err := clientcmd.BuildConfigFromFlags("", params.KubeconfigPath) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - clientset, err := kubernetes.NewForConfig(config) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - return clientset + cfg.ManagedNodeGroups = []*api.ManagedNodeGroup{ + { + NodeGroupBase: &api.NodeGroupBase{ + Name: drainMng, + }, + }, + { + NodeGroupBase: &api.NodeGroupBase{ + Name: scaleMultipleMng, + }, + }, + } + cfg.AvailabilityZones = []string{"us-west-2b", "us-west-2c"} + cfg.Metadata.Tags = map[string]string{ + "alpha.eksctl.io/description": "eksctl integration test", } - Describe("cluster with 1 node", func() { - It("should have created an EKS cluster and two CloudFormation stacks", func() { - config := NewConfig(params.Region) + Expect(params.EksctlCreateCmd. + WithArgs( + "cluster", + "--config-file", "-", + "--verbose", "4", + "--kubeconfig", params.KubeconfigPath, + ). + WithoutArg("--region", params.Region). + WithStdin(clusterutils.Reader(cfg))).To(RunSuccessfully()) - Expect(config).To(HaveExistingCluster(params.ClusterName, string(ekstypes.ClusterStatusActive), params.Version)) + // create an additional subnet to test nodegroup creation within it later on + createAdditionalSubnet(cfg) +}, func() {}) - Expect(config).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-cluster", params.ClusterName))) - Expect(config).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, mngNG1))) - }) +var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { - It("should have created a valid kubectl config file", func() { - config, err := clientcmd.LoadFromFile(params.KubeconfigPath) + Context("cluster with 2 managed and 2 unmanaged nodegroup", func() { + It("should ensure it is running and functional", func() { + By("having created an EKS cluster and 3 CloudFormation stacks") + awsConfig := NewConfig(params.Region) + Expect(awsConfig).To(HaveExistingCluster(params.ClusterName, string(ekstypes.ClusterStatusActive), params.Version)) + Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-cluster", params.ClusterName))) + Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, drainMng))) + Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, scaleMultipleMng))) + Expect(awsConfig).NotTo(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, scaleSingleNg))) + Expect(awsConfig).NotTo(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, scaleMultipleNg))) + + By("having created a valid kubectl config file") + kubeConfig, err := clientcmd.LoadFromFile(params.KubeconfigPath) Expect(err).ShouldNot(HaveOccurred()) - err = clientcmd.ConfirmUsable(*config, "") + err = clientcmd.ConfirmUsable(*kubeConfig, "") Expect(err).ShouldNot(HaveOccurred()) - Expect(config.CurrentContext).To(ContainSubstring("eksctl")) - Expect(config.CurrentContext).To(ContainSubstring(params.ClusterName)) - Expect(config.CurrentContext).To(ContainSubstring(params.Region)) - }) + Expect(kubeConfig.CurrentContext).To(ContainSubstring("eksctl")) + Expect(kubeConfig.CurrentContext).To(ContainSubstring(params.ClusterName)) + Expect(kubeConfig.CurrentContext).To(ContainSubstring(params.Region)) - Context("and listing clusters", func() { - It("should return the previously created cluster", func() { - cmd := params.EksctlGetCmd.WithArgs("clusters", "--all-regions") - AssertContainsCluster(cmd, GetClusterOutput{ + By("successfully fetching the previously created cluster") + AssertContainsCluster( + params.EksctlGetCmd.WithArgs("clusters", "--all-regions"), + GetClusterOutput{ ClusterName: params.ClusterName, Region: params.Region, EksctlCreated: "True", - }) - }) - }) + }, + ) - Context("and describe the stack for the cluster", func() { - It("should describe the cluster's stack", func() { - cmd := params.EksctlUtilsCmd.WithArgs("describe-stacks", "--cluster", params.ClusterName, "-o", "yaml") - session := cmd.Run() - Expect(session.ExitCode()).To(BeZero()) - var stacks []*cfntypes.Stack - Expect(yaml.Unmarshal(session.Out.Contents(), &stacks)).To(Succeed()) - Expect(stacks).To(HaveLen(2)) - nodegroupStack := stacks[0] - clusterStack := stacks[1] - Expect(aws.ToString(clusterStack.StackName)).To(ContainSubstring(params.ClusterName)) - Expect(aws.ToString(nodegroupStack.StackName)).To(ContainSubstring(params.ClusterName)) - Expect(aws.ToString(clusterStack.Description)).To(Equal("EKS cluster (dedicated VPC: true, dedicated IAM: true) [created and managed by eksctl]")) - Expect(aws.ToString(nodegroupStack.Description)).To(Equal("EKS Managed Nodes (SSH access: false) [created by eksctl]")) - }) + By("successfully describing cluster's CFN stacks") + session := params.EksctlUtilsCmd.WithArgs("describe-stacks", "--cluster", params.ClusterName, "-o", "yaml").Run() + Expect(session.ExitCode()).To(BeZero()) + var stacks []*cfntypes.Stack + Expect(yaml.Unmarshal(session.Out.Contents(), &stacks)).To(Succeed()) + Expect(stacks).To(HaveLen(3)) + nodegroupStack1 := stacks[0] + nodegroupStack2 := stacks[1] + clusterStack := stacks[2] + Expect(aws.ToString(clusterStack.StackName)).To(ContainSubstring(params.ClusterName)) + Expect(aws.ToString(nodegroupStack1.StackName)).To(ContainSubstring(params.ClusterName)) + Expect(aws.ToString(nodegroupStack2.StackName)).To(ContainSubstring(params.ClusterName)) + Expect(aws.ToString(clusterStack.Description)).To(Equal("EKS cluster (dedicated VPC: true, dedicated IAM: true) [created and managed by eksctl]")) + Expect(aws.ToString(nodegroupStack1.Description)).To(Equal("EKS Managed Nodes (SSH access: false) [created by eksctl]")) + Expect(aws.ToString(nodegroupStack2.Description)).To(Equal("EKS Managed Nodes (SSH access: false) [created by eksctl]")) }) - Context("toggling kubernetes API access", func() { + It("should be able to manipulate iam identity mappings", func() { var ( - clientSet kubernetes.Interface + expR0, expR1, expU0 string + role0, role1 iam.Identity + user0 iam.Identity + admin = "admin" + alice = "alice" ) - BeforeEach(func() { - cfg := &api.ClusterConfig{ - Metadata: &api.ClusterMeta{ - Name: params.ClusterName, - Region: params.Region, - }, - } - ctl, err := eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) - Expect(err).NotTo(HaveOccurred()) - err = ctl.RefreshClusterStatus(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - clientSet, err = ctl.NewStdClientSet(cfg) - Expect(err).ShouldNot(HaveOccurred()) - }) - - It("should be publicly accessible by default", func() { - _, err := clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - }) - - It("should be able to disable public access", func() { - cmd := params.EksctlUtilsCmd.WithArgs( - "set-public-access-cidrs", - "--cluster", params.ClusterName, - "1.1.1.1/32,2.2.2.0/24", - "--approve", - ) - Expect(cmd).To(RunSuccessfully()) - _, err := clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) - Expect(err).Should(HaveOccurred()) - }) - - It("should be able to re-enable public access", func() { - cmd := params.EksctlUtilsCmd.WithArgs( - "set-public-access-cidrs", - "--cluster", params.ClusterName, - "0.0.0.0/0", - "--approve", - ) - Expect(cmd).To(RunSuccessfully()) - - _, err := clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - }) - }) + roleCanonicalArn := "arn:aws:iam::123456:role/eksctl-testing-XYZ" + var err error + role0 = iam.RoleIdentity{ + RoleARN: roleCanonicalArn, + KubernetesIdentity: iam.KubernetesIdentity{ + KubernetesUsername: admin, + KubernetesGroups: []string{"system:masters", "system:nodes"}, + }, + } + role1 = iam.RoleIdentity{ + RoleARN: roleCanonicalArn, + KubernetesIdentity: iam.KubernetesIdentity{ + KubernetesGroups: []string{"system:something"}, + }, + } + + userCanonicalArn := "arn:aws:iam::123456:user/alice" + + user0 = iam.UserIdentity{ + UserARN: userCanonicalArn, + KubernetesIdentity: iam.KubernetesIdentity{ + KubernetesUsername: alice, + KubernetesGroups: []string{"system:masters", "cryptographers"}, + }, + } + + bs, err := yaml.Marshal([]iam.Identity{role0}) + Expect(err).ShouldNot(HaveOccurred()) + expR0 = string(bs) - Context("and create a new nodegroup with taints and maxPods", func() { - It("should have taints and maxPods set", func() { - By("creating a new nodegroup with taints and maxPods set") - cmd := params.EksctlCreateCmd. - WithArgs( - "nodegroup", - "--config-file", "-", - "--verbose", "4", - ). - WithoutArg("--region", params.Region). - WithStdin(clusterutils.ReaderFromFile(params.ClusterName, params.Region, "testdata/taints-max-pods.yaml")) - Expect(cmd).To(RunSuccessfully()) + bs, err = yaml.Marshal([]iam.Identity{role1}) + Expect(err).ShouldNot(HaveOccurred()) + expR1 = string(bs) - clientset := makeClientset() + bs, err = yaml.Marshal([]iam.Identity{user0}) + Expect(err).ShouldNot(HaveOccurred()) + expU0 = string(bs) - By("asserting that both formats for taints are supported") - var ( - nodeListN1 = tests.ListNodes(clientset, unmNG1) - nodeListN2 = tests.ListNodes(clientset, unmNG2) - ) + By("failing to get unknown role mapping") + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", "arn:aws:iam::123456:role/idontexist", + "-o", "yaml", + )).NotTo(RunSuccessfully()) - tests.AssertNodeTaints(nodeListN1, []corev1.Taint{ - { - Key: "key1", - Value: "val1", - Effect: "NoSchedule", - }, - { - Key: "key2", - Effect: "NoExecute", - }, - }) + By("failing to get unknown user mapping") + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", "arn:aws:iam::123456:user/bob", + "-o", "yaml", + )).NotTo(RunSuccessfully()) + + By("creating role mappings") + Expect(params.EksctlCreateCmd.WithArgs( + "iamidentitymapping", + "--name", params.ClusterName, + "--arn", role0.ARN(), + "--username", role0.Username(), + "--group", role0.Groups()[0], + "--group", role0.Groups()[1], + )).To(RunSuccessfully()) + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--name", params.ClusterName, + "--arn", role0.ARN(), + "-o", "yaml", + )).To(RunSuccessfullyWithOutputString(MatchYAML(expR0))) + + By("creating user mappings") + Expect(params.EksctlCreateCmd.WithArgs( + "iamidentitymapping", + "--name", params.ClusterName, + "--arn", user0.ARN(), + "--username", user0.Username(), + "--group", user0.Groups()[0], + "--group", user0.Groups()[1], + )).To(RunSuccessfully()) + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", user0.ARN(), + "-o", "yaml", + )).To(RunSuccessfullyWithOutputString(MatchYAML(expU0))) + + By("creating a duplicate role mapping") + Expect(params.EksctlCreateCmd.WithArgs( + "iamidentitymapping", + "--name", params.ClusterName, + "--arn", role0.ARN(), + "--username", role0.Username(), + "--group", role0.Groups()[0], + "--group", role0.Groups()[1], + )).To(RunSuccessfully()) + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--name", params.ClusterName, + "--arn", role0.ARN(), + "-o", "yaml", + )).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR0))) + + By("creating a duplicate user mapping") + Expect(params.EksctlCreateCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", user0.ARN(), + "--username", user0.Username(), + "--group", user0.Groups()[0], + "--group", user0.Groups()[1], + )).To(RunSuccessfully()) + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", user0.ARN(), + "-o", "yaml", + )).To(RunSuccessfullyWithOutputString(MatchYAML(expU0 + expU0))) - tests.AssertNodeTaints(nodeListN2, []corev1.Taint{ - { - Key: "key1", - Value: "value1", - Effect: "NoSchedule", - }, - { - Key: "key2", - Effect: "NoExecute", - }, - }) + By("creating a duplicate role mapping with different identity") + Expect(params.EksctlCreateCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", role1.ARN(), + "--group", role1.Groups()[0], + )).To(RunSuccessfully()) + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", role1.ARN(), + "-o", "yaml", + )).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR0 + expR1))) - By("asserting that maxPods is set correctly") - expectedMaxPods := 123 - for _, node := range nodeListN1.Items { - maxPods, _ := node.Status.Allocatable.Pods().AsInt64() - Expect(maxPods).To(Equal(int64(expectedMaxPods))) - } + By("deleting a single role mapping (fifo)") + Expect(params.EksctlDeleteCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", role1.ARN(), + )).To(RunSuccessfully()) + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", role1.ARN(), + "-o", "yaml", + )).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR1))) - }) + By("failing when deleting unknown mapping") + Expect(params.EksctlDeleteCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", "arn:aws:iam::123456:role/idontexist", + )).NotTo(RunSuccessfully()) + + By("deleting duplicate role mappings with --all") + Expect(params.EksctlDeleteCmd.WithArgs( + "iamidentitymapping", + "--name", params.ClusterName, + "--arn", role1.ARN(), + "--all", + )).To(RunSuccessfully()) + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--name", params.ClusterName, + "--arn", role1.ARN(), + "-o", "yaml", + )).NotTo(RunSuccessfully()) + + By("deleting duplicate user mappings with --all") + Expect(params.EksctlDeleteCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", user0.ARN(), + "--all", + )).To(RunSuccessfully()) + Expect(params.EksctlGetCmd.WithArgs( + "iamidentitymapping", + "--cluster", params.ClusterName, + "--arn", user0.ARN(), + "-o", "yaml", + )).NotTo(RunSuccessfully()) }) - Context("can add a nodegroup into a new subnet", func() { + Context("manipulate K8s API, Cloudwatch logging and IAM service accounts", func() { var ( - subnet *types.Subnet - nodegroupNameCLI string - nodegroupNameConfigFile string - subnetName string + cfg *api.ClusterConfig + ctl *eks.ClusterProvider + clientSet kubernetes.Interface + err error ) BeforeEach(func() { - nodegroupNameCLI = "test-extra-nodegroup-cli" - nodegroupNameConfigFile = "text-extra-nodegroup-config-file" - subnetName = "new-subnet" - }) - AfterEach(func() { - cmd := params.EksctlDeleteCmd.WithArgs( - "nodegroup", - "--verbose", "4", - "--cluster", params.ClusterName, - "--wait", - nodegroupNameCLI, - ) - Expect(cmd).To(RunSuccessfully()) + cfg = makeClusterConfig() - cmd = params.EksctlDeleteCmd.WithArgs( - "nodegroup", - "--verbose", "4", - "--cluster", params.ClusterName, - "--wait", - nodegroupNameConfigFile, - ) - Expect(cmd).To(RunSuccessfully()) - config := NewConfig(params.Region) - ec2 := awsec2.NewFromConfig(config) - output, err := ec2.DeleteSubnet(context.Background(), &awsec2.DeleteSubnetInput{ - SubnetId: subnet.SubnetId, - }) - Expect(err).NotTo(HaveOccurred(), output.ResultMetadata) - - }) - It("creates a new nodegroup", func() { - cfg := &api.ClusterConfig{ - Metadata: &api.ClusterMeta{ - Name: params.ClusterName, - Region: params.Region, - }, - } - ctl, err := eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) - Expect(err).NotTo(HaveOccurred()) - cl, err := ctl.GetCluster(context.Background(), params.ClusterName) - Expect(err).NotTo(HaveOccurred()) - config := NewConfig(params.Region) - ec2 := awsec2.NewFromConfig(config) - existingSubnets, err := ec2.DescribeSubnets(context.Background(), &awsec2.DescribeSubnetsInput{ - SubnetIds: cl.ResourcesVpcConfig.SubnetIds, - }) + ctl, err = eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) Expect(err).NotTo(HaveOccurred()) - Expect(len(existingSubnets.Subnets) > 0).To(BeTrue()) - s := existingSubnets.Subnets[0] - cidr := *s.CidrBlock - var ( - i1, i2, i3, i4, ic int - ) - fmt.Sscanf(cidr, "%d.%d.%d.%d/%d", &i1, &i2, &i3, &i4, &ic) - cidr = fmt.Sprintf("%d.%d.%s.%d/%d", i1, i2, "255", i4, ic) + err = ctl.RefreshClusterStatus(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) - var tags []types.Tag + clientSet, err = ctl.NewStdClientSet(cfg) + Expect(err).ShouldNot(HaveOccurred()) + }) - // filter aws: tags - for _, t := range s.Tags { - if !strings.HasPrefix(*t.Key, "aws:") { - tags = append(tags, t) - } + It("should be able to toggle Kubernetes API access", func() { + k8sAPICall := func() error { + _, err = clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) + return err } - // create a new subnet in that given vpc and zone. - output, err := ec2.CreateSubnet(context.Background(), &awsec2.CreateSubnetInput{ - AvailabilityZone: aws.String("us-west-2a"), - CidrBlock: aws.String(cidr), - TagSpecifications: []types.TagSpecification{ - { - ResourceType: types.ResourceTypeSubnet, - Tags: tags, - }, - }, - VpcId: s.VpcId, - }) - Expect(err).NotTo(HaveOccurred()) - moutput, err := ec2.ModifySubnetAttribute(context.Background(), &awsec2.ModifySubnetAttributeInput{ - MapPublicIpOnLaunch: &types.AttributeBooleanValue{ - Value: aws.Bool(true), - }, - SubnetId: output.Subnet.SubnetId, - }) - Expect(err).NotTo(HaveOccurred(), moutput.ResultMetadata) - subnet = output.Subnet - routeTables, err := ec2.DescribeRouteTables(context.Background(), &awsec2.DescribeRouteTablesInput{ - Filters: []types.Filter{ - { - Name: aws.String("association.subnet-id"), - Values: []string{*s.SubnetId}, - }, - }, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(len(routeTables.RouteTables) > 0).To(BeTrue(), fmt.Sprintf("route table ended up being empty: %+v", routeTables)) - routput, err := ec2.AssociateRouteTable(context.Background(), &awsec2.AssociateRouteTableInput{ - RouteTableId: routeTables.RouteTables[0].RouteTableId, - SubnetId: subnet.SubnetId, - }) - Expect(err).NotTo(HaveOccurred(), routput) + By("being publicly accessible by default") + _, err = clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) + Expect(k8sAPICall()).ShouldNot(HaveOccurred()) - // create a nodegroup into the new subnet via CLI - cmd := params.EksctlCreateCmd.WithArgs( - "nodegroup", - "--timeout", nodegroupTimeout, + By("disabling public access") + Expect(params.EksctlUtilsCmd.WithArgs( + "set-public-access-cidrs", "--cluster", params.ClusterName, - "--nodes", "1", - "--node-type", "p2.xlarge", - "--subnet-ids", *subnet.SubnetId, - nodegroupNameCLI, - ) - Expect(cmd).To(RunSuccessfully()) - - // create a nodegroup into the new subnet via config file - clusterConfig := makeClusterConfig() - clusterConfig.VPC = &api.ClusterVPC{ - Network: api.Network{ - ID: *s.VpcId, - }, - Subnets: &api.ClusterSubnets{ - Public: api.AZSubnetMapping{ - subnetName: api.AZSubnetSpec{ - ID: *subnet.SubnetId, - }, - }, - }, - } - clusterConfig.NodeGroups = []*api.NodeGroup{ - { - NodeGroupBase: &api.NodeGroupBase{ - Name: nodegroupNameConfigFile, - ScalingConfig: &api.ScalingConfig{ - DesiredCapacity: aws.Int(1), - }, - Subnets: []string{subnetName}, - }, - }, - } - - cmd = params.EksctlCreateCmd. - WithArgs( - "nodegroup", - "--config-file", "-", - "--verbose", "4", - "--timeout", nodegroupTimeout, - ). - WithoutArg("--region", params.Region). - WithStdin(clusterutils.Reader(clusterConfig)) - Expect(cmd).To(RunSuccessfully()) - }) - }) + "1.1.1.1/32,2.2.2.0/24", + "--approve", + )).To(RunSuccessfully()) + Expect(k8sAPICall()).Should(HaveOccurred()) - Context("creating a nodegroup with containerd runtime", func() { - var ( - nodegroupName string - ) - BeforeEach(func() { - nodegroupName = "test-containerd" - }) - AfterEach(func() { - cmd := params.EksctlDeleteCmd.WithArgs( - "nodegroup", - "--verbose", "4", + By("re-enabling public access") + Expect(params.EksctlUtilsCmd.WithArgs( + "set-public-access-cidrs", "--cluster", params.ClusterName, - "--wait", - nodegroupName, - ) - Expect(cmd).To(RunSuccessfully()) - }) - It("should create the nodegroups successfully", func() { - clusterConfig := makeClusterConfig() - clusterConfig.Metadata.Name = params.ClusterName - clusterConfig.NodeGroups = []*api.NodeGroup{ - { - NodeGroupBase: &api.NodeGroupBase{ - Name: "test-containerd", - AMIFamily: api.NodeImageFamilyAmazonLinux2, - InstanceType: "p2.xlarge", - AdditionalVolumes: []*api.VolumeMapping{ - { - VolumeName: aws.String("/dev/sdb"), - }, - }, - }, - ContainerRuntime: aws.String(api.ContainerRuntimeContainerD), - }, - } - - cmd := params.EksctlCreateCmd. - WithArgs( - "nodegroup", - "--config-file", "-", - "--verbose", "4", - "--timeout", nodegroupTimeout, - ). - WithoutArg("--region", params.Region). - WithStdin(clusterutils.Reader(clusterConfig)) - Expect(cmd).To(RunSuccessfully()) - tests.AssertNodeVolumes(params.KubeconfigPath, params.Region, "test-containerd", "/dev/sdb") + "0.0.0.0/0", + "--approve", + )).To(RunSuccessfully()) + Expect(k8sAPICall()).ShouldNot(HaveOccurred()) }) - }) - When("scaling nodegroup(s)", func() { - It("should scale a single nodegroup", func() { - By("passing the name of the nodegroup as a flag") - cmd := params.EksctlScaleNodeGroupCmd.WithArgs( - "--cluster", params.ClusterName, - "--nodes-min", "3", - "--nodes", "3", - "--nodes-max", "3", - "--name", mngNG1, + It("should be able to configure CloudWatch logging", func() { + var ( + enabled, disabled sets.String ) - Expect(cmd).To(RunSuccessfully()) - Eventually(func() runner.Cmd { - getMngNgCmd := params.EksctlGetCmd.WithArgs( - "nodegroup", - "--cluster", params.ClusterName, - "--name", mngNG1, - "-o", "yaml", - ) - return getMngNgCmd - }, "5m", "30s").Should(RunSuccessfullyWithOutputStringLines( - ContainElement(ContainSubstring("Type: managed")), - ContainElement(ContainSubstring("MaxSize: 3")), - ContainElement(ContainSubstring("MinSize: 3")), - ContainElement(ContainSubstring("DesiredCapacity: 3")), - ContainElement(ContainSubstring("Status: ACTIVE")), - )) - }) + By("having all types disabled by default") + enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(0)) + Expect(disabled.List()).To(HaveLen(5)) - It("should scale a single nodegroup with wait flag", func() { - cmd := params.EksctlScaleNodeGroupCmd.WithArgs( + By("planning to enable two of the types using flags") + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", "--cluster", params.ClusterName, - "--nodes-min", "4", - "--nodes", "4", - "--nodes-max", "4", - "--name", mngNG1, - "--wait", - ) - Expect(cmd).To(RunSuccessfully()) + "--enable-types", "api,controllerManager", + )).To(RunSuccessfully()) + enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(0)) + Expect(disabled.List()).To(HaveLen(5)) - getMngNgCmd := params.EksctlGetCmd.WithArgs( - "nodegroup", + By("enabling two of the types using flags") + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", "--cluster", params.ClusterName, - "--name", mngNG1, - "-o", "yaml", - ) - Expect(getMngNgCmd).To(RunSuccessfullyWithOutputStringLines( - ContainElement(ContainSubstring("Type: managed")), - ContainElement(ContainSubstring("MaxSize: 4")), - ContainElement(ContainSubstring("MinSize: 4")), - ContainElement(ContainSubstring("DesiredCapacity: 4")), - ContainElement(ContainSubstring("Status: ACTIVE")), - )) - }) - - It("should scale all nodegroups", func() { - By("scaling all nodegroups in the config file to the desired capacity, max size, and min size") - cmd := params.EksctlScaleNodeGroupCmd.WithArgs( - "--config-file", "-", - ). - WithoutArg("--region", params.Region). - WithStdin(clusterutils.ReaderFromFile(params.ClusterName, params.Region, "testdata/scale-nodegroups.yaml")) - Expect(cmd).To(RunSuccessfully()) - - Eventually(func() runner.Cmd { - getMngNgCmd := params.EksctlGetCmd.WithArgs( - "nodegroup", - "--cluster", params.ClusterName, - "--name", mngNG1, - "-o", "yaml", - ) - return getMngNgCmd - }, "5m", "30s").Should(RunSuccessfullyWithOutputStringLines( - ContainElement(ContainSubstring("Type: managed")), - ContainElement(ContainSubstring("MaxSize: 5")), - ContainElement(ContainSubstring("MinSize: 5")), - ContainElement(ContainSubstring("DesiredCapacity: 5")), - ContainElement(ContainSubstring("Status: ACTIVE")), - )) - - Eventually(func() runner.Cmd { - getUnmNgCmd := params.EksctlGetCmd.WithArgs( - "nodegroup", - "--cluster", params.ClusterName, - "--name", unmNG1, - "-o", "yaml", - ) - return getUnmNgCmd - }, "5m", "30s").Should(RunSuccessfullyWithOutputStringLines( - ContainElement(ContainSubstring("Type: unmanaged")), - ContainElement(ContainSubstring("MaxSize: 5")), - ContainElement(ContainSubstring("MinSize: 5")), - ContainElement(ContainSubstring("DesiredCapacity: 5")), - ContainElement(ContainSubstring("Status: CREATE_COMPLETE")), - )) - }) - }) - - Context("and add a second (GPU) nodegroup", func() { - It("should not return an error", func() { - cmd := params.EksctlCreateCmd.WithArgs( - "nodegroup", - "--timeout=45m", + "--approve", + "--enable-types", "api,controllerManager", + )).To(RunSuccessfully()) + enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(2)) + Expect(disabled.List()).To(HaveLen(3)) + Expect(enabled.List()).To(ConsistOf("api", "controllerManager")) + Expect(disabled.List()).To(ConsistOf("audit", "authenticator", "scheduler")) + + By("enabling all of the types using --enable-types=all") + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", "--cluster", params.ClusterName, - "--nodes", "1", - "--instance-types", "p2.xlarge,p3.2xlarge,p3.8xlarge,g3s.xlarge,g4ad.xlarge,g4ad.2xlarge", - "--node-private-networking", - "--node-zones", "us-west-2b,us-west-2c", - mngNG2, - ) - Expect(cmd).To(RunSuccessfully()) - }) + "--approve", + "--enable-types", "all", + )).To(RunSuccessfully()) + enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(5)) + Expect(disabled.List()).To(HaveLen(0)) - It("should be able to list nodegroups", func() { - cmd := params.EksctlGetCmd.WithArgs( - "nodegroup", - "-o", "json", + By("enabling all but one type") + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", "--cluster", params.ClusterName, - mngNG1, - ) - Expect(cmd).To(RunSuccessfullyWithOutputString(BeNodeGroupsWithNamesWhich( - HaveLen(1), - ContainElement(mngNG1), - Not(ContainElement(mngNG2)), - ))) - Expect(cmd).To(RunSuccessfullyWithOutputString(ContainSubstring(params.Version))) - - cmd = params.EksctlGetCmd.WithArgs( - "nodegroup", - "-o", "json", + "--approve", + "--enable-types", "all", + "--disable-types", "controllerManager", + )).To(RunSuccessfully()) + enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(4)) + Expect(disabled.List()).To(HaveLen(1)) + Expect(enabled.List()).To(ConsistOf("api", "audit", "authenticator", "scheduler")) + Expect(disabled.List()).To(ConsistOf("controllerManager")) + + By("disabling all but one type") + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", "--cluster", params.ClusterName, - mngNG2, - ) - Expect(cmd).To(RunSuccessfullyWithOutputString(BeNodeGroupsWithNamesWhich( - HaveLen(1), - ContainElement(mngNG2), - Not(ContainElement(mngNG1)), - ))) - Expect(cmd).To(RunSuccessfullyWithOutputString(ContainSubstring(params.Version))) - - cmd = params.EksctlGetCmd.WithArgs( - "nodegroup", - "-o", "json", + "--approve", + "--disable-types", "all", + "--enable-types", "controllerManager", + )).To(RunSuccessfully()) + enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(disabled.List()).To(HaveLen(4)) + Expect(enabled.List()).To(HaveLen(1)) + Expect(disabled.List()).To(ConsistOf("api", "audit", "authenticator", "scheduler")) + Expect(enabled.List()).To(ConsistOf("controllerManager")) + + By("disabling all of the types using --disable-types=all") + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", "--cluster", params.ClusterName, - ) - Expect(cmd).To(RunSuccessfullyWithOutputString(BeNodeGroupsWithNamesWhich( - HaveLen(4), - ContainElement(mngNG1), - ContainElement(mngNG2), - ContainElement(unmNG1), - ContainElement(unmNG2), - ))) - Expect(cmd).To(RunSuccessfullyWithOutputString(ContainSubstring(params.Version))) + "--approve", + "--disable-types", "all", + )).To(RunSuccessfully()) + enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(0)) + Expect(disabled.List()).To(HaveLen(5)) + Expect(disabled.HasAll(api.SupportedCloudWatchClusterLogTypes()...)).To(BeTrue()) }) - Context("toggle CloudWatch logging", func() { - var ( - cfg *api.ClusterConfig - ctl *eks.ClusterProvider - ) - - BeforeEach(func() { - cfg = &api.ClusterConfig{ - Metadata: &api.ClusterMeta{ - Name: params.ClusterName, - Region: params.Region, - }, - } - var err error - ctl, err = eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) - Expect(err).NotTo(HaveOccurred()) - }) + It("should be able to create, update, delete iamserviceaccounts", func() { - It("should have all types disabled by default", func() { - enabled, disable, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(0)) - Expect(disable.List()).To(HaveLen(5)) - }) - - It("should plan to enable two of the types using flags", func() { - cmd := params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--enable-types", "api,controllerManager", - ) - Expect(cmd).To(RunSuccessfully()) - - enabled, disable, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(0)) - Expect(disable.List()).To(HaveLen(5)) - }) + stackNamePrefix := fmt.Sprintf("eksctl-%s-addon-iamserviceaccount-", params.ClusterName) + awsConfig := NewConfig(params.Region) + oidc, err := ctl.NewOpenIDConnectManager(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) - It("should enable two of the types using flags", func() { - cmd := params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--approve", - "--enable-types", "api,controllerManager", - ) - Expect(cmd).To(RunSuccessfully()) - - enabled, disable, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(2)) - Expect(disable.List()).To(HaveLen(3)) - Expect(enabled.List()).To(ConsistOf("api", "controllerManager")) - Expect(disable.List()).To(ConsistOf("audit", "authenticator", "scheduler")) - }) + By("having OIDC disabled by default") + exists, err := oidc.CheckProviderExists(context.Background()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(exists).To(BeFalse()) - It("should enable all of the types with --enable-types=all", func() { - cmd := params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--approve", - "--enable-types", "all", - ) - Expect(cmd).To(RunSuccessfully()) - - enabled, disable, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(5)) - Expect(disable.List()).To(HaveLen(0)) - }) + By("enabling OIDC") + Expect(params.EksctlUtilsCmd.WithArgs( + "associate-iam-oidc-provider", + "--cluster", params.ClusterName, + "--approve", + )).To(RunSuccessfully()) - It("should enable all but one type", func() { - cmd := params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", + By("creating two iamserviceaccounts") + Expect([]Cmd{ + params.EksctlCreateCmd.WithArgs( + "iamserviceaccount", "--cluster", params.ClusterName, + "--name", "app-cache-access", + "--namespace", "app1", + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess", + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess", "--approve", - "--enable-types", "all", - "--disable-types", "controllerManager", - ) - Expect(cmd).To(RunSuccessfully()) - - enabled, disable, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(4)) - Expect(disable.List()).To(HaveLen(1)) - Expect(enabled.List()).To(ConsistOf("api", "audit", "authenticator", "scheduler")) - Expect(disable.List()).To(ConsistOf("controllerManager")) - }) - - It("should disable all but one type", func() { - cmd := params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", + ), + params.EksctlCreateCmd.WithArgs( + "iamserviceaccount", "--cluster", params.ClusterName, + "--name", "s3-read-only", + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", "--approve", - "--disable-types", "all", - "--enable-types", "controllerManager", - ) - Expect(cmd).To(RunSuccessfully()) - - enabled, disable, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(disable.List()).To(HaveLen(4)) - Expect(enabled.List()).To(HaveLen(1)) - Expect(disable.List()).To(ConsistOf("api", "audit", "authenticator", "scheduler")) - Expect(enabled.List()).To(ConsistOf("controllerManager")) - }) + ), + }).To(RunSuccessfully()) + Expect(awsConfig).To(HaveExistingStack(stackNamePrefix + "default-s3-read-only")) + Expect(awsConfig).To(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) - It("should disable all of the types with --disable-types=all", func() { - cmd := params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--approve", - "--disable-types", "all", - ) - Expect(cmd).To(RunSuccessfully()) - - enabled, disable, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(0)) - Expect(disable.List()).To(HaveLen(5)) - Expect(disable.HasAll(api.SupportedCloudWatchClusterLogTypes()...)).To(BeTrue()) - }) - }) + sa, err := clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).Get(context.TODO(), "s3-read-only", metav1.GetOptions{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(sa.Annotations).To(HaveLen(1)) + Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) + Expect(sa.Annotations[api.AnnotationEKSRoleARN]).To(MatchRegexp("^arn:aws:iam::.*:role/eksctl-" + params.ClusterName + ".*$")) - Context("create, update, and delete iamserviceaccounts", func() { - var ( - cfg *api.ClusterConfig - ctl *eks.ClusterProvider - oidc *iamoidc.OpenIDConnectManager - err error - ) + sa, err = clientSet.CoreV1().ServiceAccounts("app1").Get(context.TODO(), "app-cache-access", metav1.GetOptions{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(sa.Annotations).To(HaveLen(1)) + Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) + Expect(sa.Annotations[api.AnnotationEKSRoleARN]).To(MatchRegexp("^arn:aws:iam::.*:role/eksctl-" + params.ClusterName + ".*$")) - BeforeEach(func() { - cfg = &api.ClusterConfig{ - Metadata: &api.ClusterMeta{ - Name: params.ClusterName, - Region: params.Region, - }, - } - ctl, err = eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) - Expect(err).NotTo(HaveOccurred()) - ctx := context.Background() - err = ctl.RefreshClusterStatus(ctx, cfg) - Expect(err).ShouldNot(HaveOccurred()) - oidc, err = ctl.NewOpenIDConnectManager(ctx, cfg) - Expect(err).ShouldNot(HaveOccurred()) - }) + By("updating service account policy") + Expect(params.EksctlUpdateCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "app-cache-access", + "--namespace", "app1", + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", + "--approve", + )).To(RunSuccessfully()) - It("should enable OIDC, create two iamserviceaccounts and update the policies", func() { - { - exists, err := oidc.CheckProviderExists(context.Background()) - Expect(err).ShouldNot(HaveOccurred()) - Expect(exists).To(BeFalse()) - } + By("listing both iamserviceaccounts") + Expect(params.EksctlGetCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + )).To(RunSuccessfullyWithOutputString(MatchRegexp( + `(?m:^NAMESPACE\s+NAME\s+ROLE\sARN$)` + + `|(?m:^app1\s+app-cache-access\s+arn:aws:iam::.*$)` + + `|(?m:^default\s+s3-read-only\s+arn:aws:iam::.*$)`, + ))) - setupCmd := params.EksctlUtilsCmd.WithArgs( - "associate-iam-oidc-provider", + By("deleting both iamserviceaccounts") + Expect([]Cmd{ + params.EksctlDeleteCmd.WithArgs( + "iamserviceaccount", "--cluster", params.ClusterName, - "--approve", - ) - Expect(setupCmd).To(RunSuccessfully()) - - { - exists, err := oidc.CheckProviderExists(context.Background()) - Expect(err).ShouldNot(HaveOccurred()) - Expect(exists).To(BeTrue()) - } - - cmds := []Cmd{ - params.EksctlCreateCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "app-cache-access", - "--namespace", "app1", - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess", - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess", - "--approve", - ), - params.EksctlCreateCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "s3-read-only", - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", - "--approve", - ), - } - - Expect(cmds).To(RunSuccessfully()) - - awsSession := NewConfig(params.Region) - - stackNamePrefix := fmt.Sprintf("eksctl-%s-addon-iamserviceaccount-", params.ClusterName) - - Expect(awsSession).To(HaveExistingStack(stackNamePrefix + "default-s3-read-only")) - Expect(awsSession).To(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) - - clientSet, err := ctl.NewStdClientSet(cfg) - Expect(err).ShouldNot(HaveOccurred()) - - { - sa, err := clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).Get(context.TODO(), "s3-read-only", metav1.GetOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - - Expect(sa.Annotations).To(HaveLen(1)) - Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) - Expect(sa.Annotations[api.AnnotationEKSRoleARN]).To(MatchRegexp("^arn:aws:iam::.*:role/eksctl-" + truncate(params.ClusterName) + ".*$")) - } + "--name", "s3-read-only", + "--wait", + ), + params.EksctlDeleteCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "app-cache-access", + "--namespace", "app1", + "--wait", + ), + }).To(RunSuccessfully()) + Expect(awsConfig).NotTo(HaveExistingStack(stackNamePrefix + "default-s3-read-only")) + Expect(awsConfig).NotTo(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) + }) + }) + }) - { - sa, err := clientSet.CoreV1().ServiceAccounts("app1").Get(context.TODO(), "app-cache-access", metav1.GetOptions{}) - Expect(err).ShouldNot(HaveOccurred()) + Context("create test workloads", func() { + var ( + err error + test *harness.Test + commonTimeout = 10 * time.Minute + ) - Expect(sa.Annotations).To(HaveLen(1)) - Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) - Expect(sa.Annotations[api.AnnotationEKSRoleARN]).To(MatchRegexp("^arn:aws:iam::.*:role/eksctl-" + truncate(params.ClusterName) + ".*$")) - } + BeforeEach(func() { + test, err = kube.NewTest(params.KubeconfigPath) + Expect(err).ShouldNot(HaveOccurred()) + }) - cmds = []Cmd{ - params.EksctlUpdateCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "app-cache-access", - "--namespace", "app1", - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", - "--approve", - ), - } + AfterEach(func() { + test.Close() + Eventually(func() int { + return len(test.ListPods(test.Namespace, metav1.ListOptions{}).Items) + }, "3m", "1s").Should(BeZero()) + }) - Expect(cmds).To(RunSuccessfully()) - }) + It("should deploy podinfo service to the cluster and access it via proxy", func() { + d := test.CreateDeploymentFromFile(test.Namespace, "../../data/podinfo.yaml") + test.WaitForDeploymentReady(d, commonTimeout) - It("should list both iamserviceaccounts", func() { - cmd := params.EksctlGetCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - ) + pods := test.ListPodsFromDeployment(d) + Expect(len(pods.Items)).To(Equal(2)) - Expect(cmd).To(RunSuccessfullyWithOutputString(MatchRegexp( - `(?m:^NAMESPACE\s+NAME\s+ROLE\sARN$)` + - `|(?m:^app1\s+app-cache-access\s+arn:aws:iam::.*$)` + - `|(?m:^default\s+s3-read-only\s+arn:aws:iam::.*$)`, - ))) - }) + // For each pod of the Deployment, check we receive a sensible response to a + // GET request on /version. + for _, pod := range pods.Items { + Expect(pod.Namespace).To(Equal(test.Namespace)) - It("delete both iamserviceaccounts", func() { - cmds := []Cmd{ - params.EksctlDeleteCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "s3-read-only", - "--wait", - ), - params.EksctlDeleteCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "app-cache-access", - "--namespace", "app1", - "--wait", - ), - } - Expect(cmds).To(RunSuccessfully()) + req := test.PodProxyGet(&pod, "", "/version") + fmt.Fprintf(GinkgoWriter, "url = %#v", req.URL()) - awsSession := NewConfig(params.Region) + var js interface{} + test.PodProxyGetJSON(&pod, "", "/version", &js) - stackNamePrefix := fmt.Sprintf("eksctl-%s-addon-iamserviceaccount-", params.ClusterName) + Expect(js.(map[string]interface{})).To(HaveKeyWithValue("version", "1.5.1")) + } + }) - Expect(awsSession).NotTo(HaveExistingStack(stackNamePrefix + "default-s3-read-only")) - Expect(awsSession).NotTo(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) - }) - }) + It("should have functional DNS", func() { + d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-dns.yaml") - Context("create test workloads", func() { - var ( - err error - test *harness.Test - ) + test.WaitForDaemonSetReady(d, commonTimeout) - BeforeEach(func() { - test, err = kube.NewTest(params.KubeconfigPath) - Expect(err).ShouldNot(HaveOccurred()) - }) + { + ds, err := test.GetDaemonSet(test.Namespace, d.Name) + Expect(err).ShouldNot(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) + } + }) - AfterEach(func() { - test.Close() - Eventually(func() int { - return len(test.ListPods(test.Namespace, metav1.ListOptions{}).Items) - }, "3m", "1s").Should(BeZero()) - }) + It("should have access to HTTP(S) sites", func() { + d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-http.yaml") - It("should deploy podinfo service to the cluster and access it via proxy", func() { - d := test.CreateDeploymentFromFile(test.Namespace, "../../data/podinfo.yaml") - test.WaitForDeploymentReady(d, commonTimeout) + test.WaitForDaemonSetReady(d, commonTimeout) - pods := test.ListPodsFromDeployment(d) - Expect(len(pods.Items)).To(Equal(2)) + { + ds, err := test.GetDaemonSet(test.Namespace, d.Name) + Expect(err).ShouldNot(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) + } + }) - // For each pod of the Deployment, check we receive a sensible response to a - // GET request on /version. - for _, pod := range pods.Items { - Expect(pod.Namespace).To(Equal(test.Namespace)) + It("should be able to run pods with an iamserviceaccount", func() { + createCmd := params.EksctlCreateCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "s3-reader", + "--namespace", test.Namespace, + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", + "--approve", + ) - req := test.PodProxyGet(&pod, "", "/version") - fmt.Fprintf(GinkgoWriter, "url = %#v", req.URL()) + Expect(createCmd).To(RunSuccessfully()) - var js interface{} - test.PodProxyGetJSON(&pod, "", "/version", &js) + d := test.CreateDeploymentFromFile(test.Namespace, "../../data/iamserviceaccount-checker.yaml") + test.WaitForDeploymentReady(d, commonTimeout) - Expect(js.(map[string]interface{})).To(HaveKeyWithValue("version", "1.5.1")) - } - }) + pods := test.ListPodsFromDeployment(d) + Expect(len(pods.Items)).To(Equal(2)) - It("should have functional DNS", func() { - d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-dns.yaml") + // For each pod of the Deployment, check we get expected environment variables + // via a GET request on /env. + type sessionObject struct { + AssumedRoleUser struct { + AssumedRoleID, Arn string + } + Audience, Provider, SubjectFromWebIdentityToken string + Credentials struct { + SecretAccessKey, SessionToken, Expiration, AccessKeyID string + } + } - test.WaitForDaemonSetReady(d, commonTimeout) + for _, pod := range pods.Items { + Expect(pod.Namespace).To(Equal(test.Namespace)) - { - ds, err := test.GetDaemonSet(test.Namespace, d.Name) - Expect(err).ShouldNot(HaveOccurred()) - fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) - } - }) + so := sessionObject{} - It("should have access to HTTP(S) sites", func() { - d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-http.yaml") + var js []string + test.PodProxyGetJSON(&pod, "", "/env", &js) - test.WaitForDaemonSetReady(d, commonTimeout) + Expect(js).To(ContainElement(HavePrefix("AWS_ROLE_ARN=arn:aws:iam::"))) + Expect(js).To(ContainElement("AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token")) + Expect(js).To(ContainElement(HavePrefix("AWS_SESSION_OBJECT="))) - { - ds, err := test.GetDaemonSet(test.Namespace, d.Name) + for _, envVar := range js { + if strings.HasPrefix(envVar, "AWS_SESSION_OBJECT=") { + err := json.Unmarshal([]byte(strings.TrimPrefix(envVar, "AWS_SESSION_OBJECT=")), &so) Expect(err).ShouldNot(HaveOccurred()) - fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) } - }) - - It("should be able to run pods with an iamserviceaccount", func() { - createCmd := params.EksctlCreateCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "s3-reader", - "--namespace", test.Namespace, - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", - "--approve", - ) - - Expect(createCmd).To(RunSuccessfully()) - - d := test.CreateDeploymentFromFile(test.Namespace, "../../data/iamserviceaccount-checker.yaml") - test.WaitForDeploymentReady(d, commonTimeout) - - pods := test.ListPodsFromDeployment(d) - Expect(len(pods.Items)).To(Equal(2)) - - // For each pod of the Deployment, check we get expected environment variables - // via a GET request on /env. - type sessionObject struct { - AssumedRoleUser struct { - AssumedRoleID, Arn string - } - Audience, Provider, SubjectFromWebIdentityToken string - Credentials struct { - SecretAccessKey, SessionToken, Expiration, AccessKeyID string - } - } - - for _, pod := range pods.Items { - Expect(pod.Namespace).To(Equal(test.Namespace)) - - so := sessionObject{} + } - var js []string - test.PodProxyGetJSON(&pod, "", "/env", &js) + Expect(so.AssumedRoleUser.AssumedRoleID).To(HaveSuffix(":integration-test")) - Expect(js).To(ContainElement(HavePrefix("AWS_ROLE_ARN=arn:aws:iam::"))) - Expect(js).To(ContainElement("AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token")) - Expect(js).To(ContainElement(HavePrefix("AWS_SESSION_OBJECT="))) + Expect(so.AssumedRoleUser.Arn).To(MatchRegexp("^arn:aws:sts::.*:assumed-role/eksctl-" + params.ClusterName + "-.*/integration-test$")) - for _, envVar := range js { - if strings.HasPrefix(envVar, "AWS_SESSION_OBJECT=") { - err := json.Unmarshal([]byte(strings.TrimPrefix(envVar, "AWS_SESSION_OBJECT=")), &so) - Expect(err).ShouldNot(HaveOccurred()) - } - } + Expect(so.Audience).To(Equal("sts.amazonaws.com")) - Expect(so.AssumedRoleUser.AssumedRoleID).To(HaveSuffix(":integration-test")) + Expect(so.Provider).To(MatchRegexp("^arn:aws:iam::.*:oidc-provider/oidc.eks." + params.Region + ".amazonaws.com/id/.*$")) - Expect(so.AssumedRoleUser.Arn).To(MatchRegexp("^arn:aws:sts::.*:assumed-role/eksctl-" + truncate(params.ClusterName) + "-.*/integration-test$")) + Expect(so.SubjectFromWebIdentityToken).To(Equal("system:serviceaccount:" + test.Namespace + ":s3-reader")) - Expect(so.Audience).To(Equal("sts.amazonaws.com")) + Expect(so.Credentials.SecretAccessKey).NotTo(BeEmpty()) + Expect(so.Credentials.SessionToken).NotTo(BeEmpty()) + Expect(so.Credentials.Expiration).NotTo(BeEmpty()) + Expect(so.Credentials.AccessKeyID).NotTo(BeEmpty()) + } - Expect(so.Provider).To(MatchRegexp("^arn:aws:iam::.*:oidc-provider/oidc.eks." + params.Region + ".amazonaws.com/id/.*$")) + Expect(params.EksctlDeleteCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "s3-reader", + "--namespace", test.Namespace, + )).To(RunSuccessfully()) + }) + }) - Expect(so.SubjectFromWebIdentityToken).To(Equal("system:serviceaccount:" + test.Namespace + ":s3-reader")) + Context("creating nodegroups", func() { + It("should be able to create two nodegroups with taints and maxPods", func() { + By("creating them") + Expect(params.EksctlCreateCmd. + WithArgs( + "nodegroup", + "--config-file", "-", + "--verbose", "4", + ). + WithoutArg("--region", params.Region). + WithStdin(clusterutils.ReaderFromFile(params.ClusterName, params.Region, "testdata/taints-max-pods.yaml"))).To(RunSuccessfully()) + + By("asserting that both formats for taints are supported") + config, err := clientcmd.BuildConfigFromFlags("", params.KubeconfigPath) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + clientset, err := kubernetes.NewForConfig(config) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + nodeListN1 := tests.ListNodes(clientset, taintsNg1) + nodeListN2 := tests.ListNodes(clientset, taintsNg2) + + tests.AssertNodeTaints(nodeListN1, []corev1.Taint{ + { + Key: "key1", + Value: "val1", + Effect: "NoSchedule", + }, + { + Key: "key2", + Effect: "NoExecute", + }, + }) + tests.AssertNodeTaints(nodeListN2, []corev1.Taint{ + { + Key: "key1", + Value: "value1", + Effect: "NoSchedule", + }, + { + Key: "key2", + Effect: "NoExecute", + }, + }) - Expect(so.Credentials.SecretAccessKey).NotTo(BeEmpty()) - Expect(so.Credentials.SessionToken).NotTo(BeEmpty()) - Expect(so.Credentials.Expiration).NotTo(BeEmpty()) - Expect(so.Credentials.AccessKeyID).NotTo(BeEmpty()) - } + By("asserting that maxPods is set correctly") + expectedMaxPods := 123 + for _, node := range nodeListN1.Items { + maxPods, _ := node.Status.Allocatable.Pods().AsInt64() + Expect(maxPods).To(Equal(int64(expectedMaxPods))) + } + }) - deleteCmd := params.EksctlDeleteCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "s3-reader", - "--namespace", test.Namespace, - ) + It("should be able to create a new GPU nodegroup", func() { + Expect(params.EksctlCreateCmd.WithArgs( + "nodegroup", + "--timeout=45m", + "--cluster", params.ClusterName, + "--nodes", "1", + "--instance-types", "p2.xlarge,p3.2xlarge,p3.8xlarge,g3s.xlarge,g4ad.xlarge,g4ad.2xlarge", + "--node-private-networking", + "--node-zones", "us-west-2b,us-west-2c", + GPUMng, + )).To(RunSuccessfully()) + }) - Expect(deleteCmd).To(RunSuccessfully()) + Context("creating nodegroups within a new subnet", func() { + var ( + subnet ec2types.Subnet + subnetName string + ) + BeforeEach(func() { + ec2 := awsec2.NewFromConfig(NewConfig(params.Region)) + output, err := ec2.DescribeSubnets(context.Background(), &awsec2.DescribeSubnetsInput{ + Filters: []ec2types.Filter{ + { + Name: aws.String("availability-zone"), + Values: []string{"us-west-2a"}, + }, + }, }) + Expect(err).NotTo(HaveOccurred()) + Expect(len(output.Subnets)).To(Equal(1)) + subnet = output.Subnets[0] + subnetName = "new-subnet" }) - Context("and manipulating iam identity mappings", func() { - var ( - expR0, expR1, expU0 string - role0, role1 iam.Identity - user0 iam.Identity - admin = "admin" - alice = "alice" - ) - - BeforeEach(func() { - roleCanonicalArn := "arn:aws:iam::123456:role/eksctl-testing-XYZ" - var err error - role0 = iam.RoleIdentity{ - RoleARN: roleCanonicalArn, - KubernetesIdentity: iam.KubernetesIdentity{ - KubernetesUsername: admin, - KubernetesGroups: []string{"system:masters", "system:nodes"}, - }, - } - role1 = iam.RoleIdentity{ - RoleARN: roleCanonicalArn, - KubernetesIdentity: iam.KubernetesIdentity{ - KubernetesGroups: []string{"system:something"}, + It("should be able to create a nodegroup in a new subnet via config file", func() { + clusterConfig := makeClusterConfig() + clusterConfig.VPC = &api.ClusterVPC{ + Network: api.Network{ + ID: *subnet.VpcId, + }, + Subnets: &api.ClusterSubnets{ + Public: api.AZSubnetMapping{ + subnetName: api.AZSubnetSpec{ + ID: *subnet.SubnetId, + }, }, - } - - userCanonicalArn := "arn:aws:iam::123456:user/alice" - - user0 = iam.UserIdentity{ - UserARN: userCanonicalArn, - KubernetesIdentity: iam.KubernetesIdentity{ - KubernetesUsername: alice, - KubernetesGroups: []string{"system:masters", "cryptographers"}, + }, + } + clusterConfig.NodeGroups = []*api.NodeGroup{ + { + NodeGroupBase: &api.NodeGroupBase{ + Name: newSubnetConfigFileMng, + ScalingConfig: &api.ScalingConfig{ + DesiredCapacity: aws.Int(1), + }, + Subnets: []string{subnetName}, }, - } - - bs, err := yaml.Marshal([]iam.Identity{role0}) - Expect(err).ShouldNot(HaveOccurred()) - expR0 = string(bs) - - bs, err = yaml.Marshal([]iam.Identity{role1}) - Expect(err).ShouldNot(HaveOccurred()) - expR1 = string(bs) - - bs, err = yaml.Marshal([]iam.Identity{user0}) - Expect(err).ShouldNot(HaveOccurred()) - expU0 = string(bs) - }) - - It("fails getting unknown role mapping", func() { - cmd := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", "arn:aws:iam::123456:role/idontexist", - "-o", "yaml", - ) - Expect(cmd).NotTo(RunSuccessfully()) - }) - It("fails getting unknown user mapping", func() { - cmd := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", "arn:aws:iam::123456:user/bob", - "-o", "yaml", - ) - Expect(cmd).NotTo(RunSuccessfully()) - }) - It("creates role mapping", func() { - create := params.EksctlCreateCmd.WithArgs( - "iamidentitymapping", - "--name", params.ClusterName, - "--arn", role0.ARN(), - "--username", role0.Username(), - "--group", role0.Groups()[0], - "--group", role0.Groups()[1], - ) - Expect(create).To(RunSuccessfully()) - - get := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--name", params.ClusterName, - "--arn", role0.ARN(), - "-o", "yaml", - ) - Expect(get).To(RunSuccessfullyWithOutputString(MatchYAML(expR0))) - }) - It("creates user mapping", func() { - create := params.EksctlCreateCmd.WithArgs( - "iamidentitymapping", - "--name", params.ClusterName, - "--arn", user0.ARN(), - "--username", user0.Username(), - "--group", user0.Groups()[0], - "--group", user0.Groups()[1], - ) - Expect(create).To(RunSuccessfully()) - - get := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", user0.ARN(), - "-o", "yaml", - ) - Expect(get).To(RunSuccessfullyWithOutputString(MatchYAML(expU0))) - }) - It("creates a duplicate role mapping", func() { - createRole := params.EksctlCreateCmd.WithArgs( - "iamidentitymapping", - "--name", params.ClusterName, - "--arn", role0.ARN(), - "--username", role0.Username(), - "--group", role0.Groups()[0], - "--group", role0.Groups()[1], - ) - Expect(createRole).To(RunSuccessfully()) - - get := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--name", params.ClusterName, - "--arn", role0.ARN(), - "-o", "yaml", - ) - Expect(get).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR0))) - }) - It("creates a duplicate user mapping", func() { - createCmd := params.EksctlCreateCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", user0.ARN(), - "--username", user0.Username(), - "--group", user0.Groups()[0], - "--group", user0.Groups()[1], - ) - Expect(createCmd).To(RunSuccessfully()) - - getCmd := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", user0.ARN(), - "-o", "yaml", - ) - Expect(getCmd).To(RunSuccessfullyWithOutputString(MatchYAML(expU0 + expU0))) - }) - It("creates a duplicate role mapping with different identity", func() { - createCmd := params.EksctlCreateCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", role1.ARN(), - "--group", role1.Groups()[0], - ) - Expect(createCmd).To(RunSuccessfully()) - - getCmd := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", role1.ARN(), - "-o", "yaml", - ) - Expect(getCmd).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR0 + expR1))) - }) - It("deletes a single role mapping fifo", func() { - deleteCmd := params.EksctlDeleteCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", role1.ARN(), - ) - Expect(deleteCmd).To(RunSuccessfully()) - - getCmd := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", role1.ARN(), - "-o", "yaml", - ) - Expect(getCmd).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR1))) - }) - It("fails when deleting unknown mapping", func() { - deleteCmd := params.EksctlDeleteCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", "arn:aws:iam::123456:role/idontexist", - ) - Expect(deleteCmd).NotTo(RunSuccessfully()) - }) - It("deletes duplicate role mappings with --all", func() { - deleteCmd := params.EksctlDeleteCmd.WithArgs( - "iamidentitymapping", - "--name", params.ClusterName, - "--arn", role1.ARN(), - "--all", - ) - Expect(deleteCmd).To(RunSuccessfully()) - - getCmd := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--name", params.ClusterName, - "--arn", role1.ARN(), - "-o", "yaml", - ) - Expect(getCmd).NotTo(RunSuccessfully()) - }) - It("deletes duplicate user mappings with --all", func() { - deleteCmd := params.EksctlDeleteCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", user0.ARN(), - "--all", - ) - Expect(deleteCmd).To(RunSuccessfully()) - - getCmd := params.EksctlGetCmd.WithArgs( - "iamidentitymapping", - "--cluster", params.ClusterName, - "--arn", user0.ARN(), - "-o", "yaml", - ) - Expect(getCmd).NotTo(RunSuccessfully()) - }) - }) + }, + } - Context("and delete the second nodegroup", func() { - It("should not return an error", func() { - cmd := params.EksctlDeleteCmd.WithArgs( - "nodegroup", - "--verbose", "4", - "--cluster", params.ClusterName, - mngNG2, - ) - Expect(cmd).To(RunSuccessfully()) - }) + Expect(params.EksctlCreateCmd.WithArgs( + "nodegroup", + "--config-file", "-", + "--verbose", "4", + "--timeout", time.Hour.String(), + ). + WithoutArg("--region", params.Region). + WithStdin(clusterutils.Reader(clusterConfig))).To(RunSuccessfully()) }) - }) - Context("and scale the initial nodegroup back to 1 node", func() { - It("should not return an error", func() { - cmd := params.EksctlScaleNodeGroupCmd.WithArgs( + It("should be able to create a nodegroup in a new subnet via CLI", func() { + Expect(params.EksctlCreateCmd.WithArgs( + "nodegroup", + "--timeout", time.Hour.String(), "--cluster", params.ClusterName, - "--nodes-min", "1", "--nodes", "1", - "--nodes-max", "1", - "--name", mngNG1, - ) - Expect(cmd).To(RunSuccessfully()) - - Eventually(func() runner.Cmd { - getMngNgCmd := params.EksctlGetCmd.WithArgs( - "nodegroup", - "--cluster", params.ClusterName, - "--name", mngNG1, - "-o", "yaml", - ) - return getMngNgCmd - }, "5m", "30s").Should(RunSuccessfullyWithOutputStringLines( - ContainElement(ContainSubstring("Type: managed")), - ContainElement(ContainSubstring("MaxSize: 1")), - ContainElement(ContainSubstring("MinSize: 1")), - ContainElement(ContainSubstring("DesiredCapacity: 1")), - ContainElement(ContainSubstring("Status: ACTIVE")), - )) + "--node-type", "p2.xlarge", + "--subnet-ids", *subnet.SubnetId, + newSubnetCLIMng, + )).To(RunSuccessfully()) }) }) + }) - Context("and drain the initial nodegroup", func() { - It("should not return an error", func() { - cmd := params.EksctlDrainNodeGroupCmd.WithArgs( - "--cluster", params.ClusterName, - "--name", mngNG1, - ) - Expect(cmd).To(RunSuccessfully()) - }) + Context("scaling nodegroup(s)", func() { + + scaleNgCmd := func(desiredCapacity string) runner.Cmd { + return params.EksctlScaleNodeGroupCmd.WithArgs( + "--cluster", params.ClusterName, + "--nodes-min", desiredCapacity, + "--nodes", desiredCapacity, + "--nodes-max", desiredCapacity, + "--name", scaleSingleNg, + ) + } + + getNgCmd := func(ngName string) runner.Cmd { + return params.EksctlGetCmd.WithArgs( + "nodegroup", + "--cluster", params.ClusterName, + "--name", ngName, + "-o", "yaml", + ) + } + + It("should be able to scale a single nodegroup", func() { + By("upscaling a nodegroup without --wait flag") + Expect(scaleNgCmd("3")).To(RunSuccessfully()) + Eventually(getNgCmd(scaleSingleNg), "5m", "30s").Should(RunSuccessfullyWithOutputStringLines( + ContainElement(ContainSubstring("Type: unmanaged")), + ContainElement(ContainSubstring("MaxSize: 3")), + ContainElement(ContainSubstring("MinSize: 3")), + ContainElement(ContainSubstring("DesiredCapacity: 3")), + ContainElement(ContainSubstring("Status: CREATE_COMPLETE")), + )) + + By("upscaling a nodegroup with --wait flag") + Expect(scaleNgCmd("4")).To(RunSuccessfully()) + Eventually(getNgCmd(scaleSingleNg), "5m", "30s").Should(RunSuccessfullyWithOutputStringLines( + ContainElement(ContainSubstring("Type: unmanaged")), + ContainElement(ContainSubstring("MaxSize: 4")), + ContainElement(ContainSubstring("MinSize: 4")), + ContainElement(ContainSubstring("DesiredCapacity: 4")), + ContainElement(ContainSubstring("Status: CREATE_COMPLETE")), + )) + + By("downscaling a nodegroup") + Expect(scaleNgCmd("1")).To(RunSuccessfully()) + Eventually(getNgCmd(scaleSingleNg), "5m", "30s").Should(runner.RunSuccessfullyWithOutputStringLines( + ContainElement(ContainSubstring("Type: unmanaged")), + ContainElement(ContainSubstring("MaxSize: 1")), + ContainElement(ContainSubstring("MinSize: 1")), + ContainElement(ContainSubstring("DesiredCapacity: 1")), + ContainElement(ContainSubstring("Status: CREATE_COMPLETE")), + )) }) - Context("and deleting the cluster", func() { - It("should not return an error", func() { - if params.SkipDelete { - Skip("will not delete cluster " + params.ClusterName) - } + It("should be able to scale multiple nodegroups", func() { + By("passing a config file") + Expect(params.EksctlScaleNodeGroupCmd.WithArgs( + "--config-file", "-", + ). + WithoutArg("--region", params.Region). + WithStdin(clusterutils.ReaderFromFile(params.ClusterName, params.Region, "testdata/scale-nodegroups.yaml")), + ).To(RunSuccessfully()) + + Eventually(getNgCmd(scaleMultipleNg), "5m", "30s").Should(RunSuccessfullyWithOutputStringLines( + ContainElement(ContainSubstring("Type: unmanaged")), + ContainElement(ContainSubstring("MaxSize: 5")), + ContainElement(ContainSubstring("MinSize: 5")), + ContainElement(ContainSubstring("DesiredCapacity: 5")), + ContainElement(ContainSubstring("Status: CREATE_COMPLETE")), + )) + + Eventually(getNgCmd(scaleMultipleMng), "5m", "30s").Should(runner.RunSuccessfullyWithOutputStringLines( + ContainElement(ContainSubstring("Type: managed")), + ContainElement(ContainSubstring("MaxSize: 5")), + ContainElement(ContainSubstring("MinSize: 5")), + ContainElement(ContainSubstring("DesiredCapacity: 5")), + ContainElement(ContainSubstring("Status: ACTIVE")), + )) + }) + }) - cmd := params.EksctlDeleteClusterCmd.WithArgs( - "--name", params.ClusterName, - ) - Expect(cmd).To(RunSuccessfully()) - }) + Context("draining nodegroup(s)", func() { + It("should be able to drain a nodegroup", func() { + Expect(params.EksctlDrainNodeGroupCmd.WithArgs( + "--cluster", params.ClusterName, + "--name", drainMng, + )).To(RunSuccessfully()) }) }) }) -var _ = AfterSuite(func() { +var _ = SynchronizedAfterSuite(func() {}, func() { + Expect(params.EksctlGetCmd.WithArgs( + "nodegroup", + "-o", "json", + "--cluster", params.ClusterName, + )).To(RunSuccessfullyWithOutputString(BeNodeGroupsWithNamesWhich( + HaveLen(9), + ContainElement(taintsNg1), + ContainElement(taintsNg2), + ContainElement(scaleSingleNg), + ContainElement(scaleMultipleNg), + ContainElement(scaleMultipleMng), + ContainElement(GPUMng), + ContainElement(drainMng), + ContainElement(newSubnetCLIMng), + ContainElement(newSubnetConfigFileMng), + ))) + params.DeleteClusters() gexec.KillAndWait() if params.KubeconfigTemp { @@ -1357,10 +1018,73 @@ var _ = AfterSuite(func() { os.RemoveAll(params.TestDirectory) }) -func truncate(clusterName string) string { - // CloudFormation seems to truncate long cluster names at 37 characters: - if len(clusterName) > 37 { - return clusterName[:37] +func createAdditionalSubnet(cfg *api.ClusterConfig) { + ctl, err := eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) + Expect(err).NotTo(HaveOccurred()) + cl, err := ctl.GetCluster(context.Background(), params.ClusterName) + Expect(err).NotTo(HaveOccurred()) + + ec2 := awsec2.NewFromConfig(NewConfig(params.Region)) + existingSubnets, err := ec2.DescribeSubnets(context.Background(), &awsec2.DescribeSubnetsInput{ + SubnetIds: cl.ResourcesVpcConfig.SubnetIds, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(len(existingSubnets.Subnets) > 0).To(BeTrue()) + existingSubnet := existingSubnets.Subnets[0] + + cidr := *existingSubnet.CidrBlock + var ( + i1, i2, i3, i4, ic int + ) + fmt.Sscanf(cidr, "%d.%d.%d.%d/%d", &i1, &i2, &i3, &i4, &ic) + cidr = fmt.Sprintf("%d.%d.%s.%d/%d", i1, i2, "255", i4, ic) + + var tags []ec2types.Tag + + // filter aws: tags + for _, t := range existingSubnet.Tags { + if !strings.HasPrefix(*t.Key, "aws:") { + tags = append(tags, t) + } } - return clusterName + + // create a new subnet in that given vpc and zone. + output, err := ec2.CreateSubnet(context.Background(), &awsec2.CreateSubnetInput{ + AvailabilityZone: aws.String("us-west-2a"), + CidrBlock: aws.String(cidr), + TagSpecifications: []types.TagSpecification{ + { + ResourceType: types.ResourceTypeSubnet, + Tags: tags, + }, + }, + VpcId: existingSubnet.VpcId, + }) + Expect(err).NotTo(HaveOccurred()) + + moutput, err := ec2.ModifySubnetAttribute(context.Background(), &awsec2.ModifySubnetAttributeInput{ + MapPublicIpOnLaunch: &types.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + SubnetId: output.Subnet.SubnetId, + }) + Expect(err).NotTo(HaveOccurred(), moutput.ResultMetadata) + + subnet := output.Subnet + routeTables, err := ec2.DescribeRouteTables(context.Background(), &awsec2.DescribeRouteTablesInput{ + Filters: []types.Filter{ + { + Name: aws.String("association.subnet-id"), + Values: []string{*existingSubnet.SubnetId}, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(len(routeTables.RouteTables) > 0).To(BeTrue(), fmt.Sprintf("route table ended up being empty: %+v", routeTables)) + + routput, err := ec2.AssociateRouteTable(context.Background(), &awsec2.AssociateRouteTableInput{ + RouteTableId: routeTables.RouteTables[0].RouteTableId, + SubnetId: subnet.SubnetId, + }) + Expect(err).NotTo(HaveOccurred(), routput) } diff --git a/integration/tests/crud/testdata/scale-nodegroups.yaml b/integration/tests/crud/testdata/scale-nodegroups.yaml index 82465b6ea3..466418648c 100644 --- a/integration/tests/crud/testdata/scale-nodegroups.yaml +++ b/integration/tests/crud/testdata/scale-nodegroups.yaml @@ -6,13 +6,13 @@ metadata: name: nodeGroups: - - name: unm-1 + - name: ng-scale-multiple desiredCapacity: 5 maxSize: 5 minSize: 5 managedNodeGroups: - - name: mng-1 + - name: mng-scale-multiple desiredCapacity: 5 maxSize: 5 minSize: 5 diff --git a/integration/tests/crud/testdata/taints-max-pods.yaml b/integration/tests/crud/testdata/taints-max-pods.yaml index 631c7c9ddf..7881cd83ac 100644 --- a/integration/tests/crud/testdata/taints-max-pods.yaml +++ b/integration/tests/crud/testdata/taints-max-pods.yaml @@ -6,12 +6,12 @@ metadata: name: nodeGroups: -- name: unm-1 +- name: ng-taints-1 taints: key1: val1:NoSchedule key2: :NoExecute maxPodsPerNode: 123 -- name: unm-2 +- name: ng-taints-2 volumeSize: 35 taints: - key: key1 From 81465f08af5a56570cc92bd45062e33fdd28e992 Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Wed, 25 Oct 2023 16:51:39 +0300 Subject: [PATCH 02/10] refactor to use ginkgo decorators --- .mockery.yaml | 9 +- docs/release_notes/0.164.0.md | 13 + examples/38-cluster-subnets-sgs.yaml | 22 + integration/scripts/ginkgo-parallel-procs.sh | 18 + .../tests/crud/creategetdelete_test.go | 940 +++++++++--------- .../tests/managed/managed_nodegroup_test.go | 105 ++ pkg/actions/addon/tasks.go | 3 +- .../eksctl.io/v1alpha5/assets/schema.json | 20 +- pkg/apis/eksctl.io/v1alpha5/types.go | 3 + pkg/apis/eksctl.io/v1alpha5/validation.go | 4 + .../eksctl.io/v1alpha5/validation_test.go | 35 + pkg/apis/eksctl.io/v1alpha5/vpc.go | 6 + .../v1alpha5/zz_generated.deepcopy.go | 10 + pkg/cfn/builder/cluster.go | 22 +- pkg/ctl/cmdutils/update_cluster_vpc.go | 89 ++ pkg/ctl/utils/mocks/VPCConfigUpdater.go | 43 + pkg/ctl/utils/nodegroup_health.go | 2 +- pkg/ctl/utils/set_public_access_cidrs.go | 47 +- .../utils/update_cluster_endpoint_access.go | 78 +- pkg/ctl/utils/update_cluster_vpc_config.go | 82 ++ .../utils/update_cluster_vpc_config_test.go | 26 + pkg/ctl/utils/utils.go | 5 +- pkg/ctl/utils/vpc_helper.go | 180 ++++ pkg/ctl/utils/vpc_helper_test.go | 344 +++++++ pkg/eks/mocks/ConfigProvider.go | 2 +- pkg/eks/mocks/KubeNodeGroup.go | 2 +- pkg/eks/mocksv2/ASG.go | 2 +- pkg/eks/mocksv2/CloudFormation.go | 2 +- pkg/eks/mocksv2/CloudTrail.go | 2 +- pkg/eks/mocksv2/CloudWatchLogs.go | 2 +- pkg/eks/mocksv2/CredentialsProvider.go | 2 +- pkg/eks/mocksv2/EC2.go | 2 +- pkg/eks/mocksv2/EKS.go | 2 +- pkg/eks/mocksv2/ELB.go | 2 +- pkg/eks/mocksv2/ELBV2.go | 2 +- pkg/eks/mocksv2/IAM.go | 2 +- pkg/eks/mocksv2/Outposts.go | 2 +- pkg/eks/mocksv2/SSM.go | 2 +- pkg/eks/mocksv2/STS.go | 2 +- pkg/eks/update.go | 21 +- pkg/version/release.go | 2 +- userdocs/mkdocs.yml | 1 + userdocs/src/getting-started.md | 2 + .../usage/cluster-subnets-security-groups.md | 83 ++ userdocs/src/usage/vpc-cluster-access.md | 39 +- 45 files changed, 1690 insertions(+), 594 deletions(-) create mode 100644 docs/release_notes/0.164.0.md create mode 100644 examples/38-cluster-subnets-sgs.yaml create mode 100755 integration/scripts/ginkgo-parallel-procs.sh create mode 100644 pkg/ctl/cmdutils/update_cluster_vpc.go create mode 100644 pkg/ctl/utils/mocks/VPCConfigUpdater.go create mode 100644 pkg/ctl/utils/update_cluster_vpc_config.go create mode 100644 pkg/ctl/utils/update_cluster_vpc_config_test.go create mode 100644 pkg/ctl/utils/vpc_helper.go create mode 100644 pkg/ctl/utils/vpc_helper_test.go create mode 100644 userdocs/src/usage/cluster-subnets-security-groups.md diff --git a/.mockery.yaml b/.mockery.yaml index 8350d6f9de..d9e1b07099 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -15,7 +15,7 @@ packages: config: dir: "{{.InterfaceDir}}/mocks" outpkg: mocks - + github.com/aws/aws-sdk-go-v2/aws: interfaces: CredentialsProvider: @@ -29,3 +29,10 @@ packages: config: dir: "./pkg/eks/mocks" outpkg: mocks + + github.com/weaveworks/eksctl/pkg/ctl/utils: + interfaces: + VPCConfigUpdater: + config: + dir: "{{.InterfaceDir}}/mocks" + outpkg: mocks diff --git a/docs/release_notes/0.164.0.md b/docs/release_notes/0.164.0.md new file mode 100644 index 0000000000..d600aabd7d --- /dev/null +++ b/docs/release_notes/0.164.0.md @@ -0,0 +1,13 @@ +# Release v0.164.0 + +## 🚀 Features + +- Allow mutating control plane subnets and security groups (#7218) + +## 🎯 Improvements + +- Add `eksctl utils update-cluster-vpc-config` to update the cluster VPC config (#7090) + +## 🐛 Bug Fixes + +- Don't close error channel early when wait flag is used (#7217) diff --git a/examples/38-cluster-subnets-sgs.yaml b/examples/38-cluster-subnets-sgs.yaml new file mode 100644 index 0000000000..67e1b8202a --- /dev/null +++ b/examples/38-cluster-subnets-sgs.yaml @@ -0,0 +1,22 @@ +# An example config for updating API server endpoint access, public access CIDRs, and control plane subnets and security groups. +# To perform the update, run `eksctl utils update-cluster-vpc-config -f 38-cluster-subnets-sgs.yaml` + +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: cluster-38 + region: us-west-2 + +iam: + withOIDC: true + +vpc: + controlPlaneSubnetIDs: [subnet-1234, subnet-5678] + controlPlaneSecurityGroupIDs: [sg-1234, sg-5678] + clusterEndpoints: + publicAccess: true + privateAccess: true + publicAccessCIDRs: ["1.1.1.1/32"] + +managedNodeGroups: + - name: mng1 diff --git a/integration/scripts/ginkgo-parallel-procs.sh b/integration/scripts/ginkgo-parallel-procs.sh new file mode 100755 index 0000000000..ea861397d8 --- /dev/null +++ b/integration/scripts/ginkgo-parallel-procs.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +PARALLEL_PROCS="-p -procs=" + +case $1 in + "crud") + echo "${PARALLEL_PROCS}5" + ;; + "managed") + echo "${PARALLEL_PROCS}5" + ;; + "windows") + echo "${PARALLEL_PROCS}3" + ;; + *) + echo "" + ;; +esac \ No newline at end of file diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index cddd9d4fc6..24aa2a66cd 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -17,12 +17,11 @@ import ( . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" "github.com/pkg/errors" - "gopkg.in/yaml.v2" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/yaml" "github.com/aws/aws-sdk-go-v2/aws" cfntypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" @@ -40,11 +39,15 @@ import ( api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/iam" + iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc" "github.com/weaveworks/eksctl/pkg/testutils" "github.com/weaveworks/eksctl/pkg/utils/file" ) -var params *tests.Params +var ( + params *tests.Params + extraSubnetID string +) func init() { // Call testing.Init() prior to tests.NewParams(), as otherwise -test.* will not be recognised. See also: https://golang.org/doc/go1.13#testing @@ -52,7 +55,7 @@ func init() { if err := api.Register(); err != nil { panic(errors.Wrap(err, "unexpected error registering API scheme")) } - params = tests.NewParamsWithGivenClusterName("crud", "test-cluster") + params = tests.NewParamsWithGivenClusterName("crud", "test-crud") } func TestCRUD(t *testing.T) { @@ -60,6 +63,7 @@ func TestCRUD(t *testing.T) { } const ( + deleteNg = "ng-delete" taintsNg1 = "ng-taints-1" taintsNg2 = "ng-taints-2" scaleSingleNg = "ng-scale-single" @@ -80,7 +84,7 @@ func makeClusterConfig() *api.ClusterConfig { return clusterConfig } -var _ = SynchronizedBeforeSuite(func() { +var _ = SynchronizedBeforeSuite(func() []byte { params.KubeconfigTemp = false if params.KubeconfigPath == "" { wd, _ := os.Getwd() @@ -102,12 +106,16 @@ var _ = SynchronizedBeforeSuite(func() { ) Expect(cmd).To(RunSuccessfully()) } - return } fmt.Fprintf(GinkgoWriter, "Using kubeconfig: %s\n", params.KubeconfigPath) cfg := makeClusterConfig() cfg.NodeGroups = []*api.NodeGroup{ + { + NodeGroupBase: &api.NodeGroupBase{ + Name: deleteNg, + }, + }, { NodeGroupBase: &api.NodeGroupBase{ Name: scaleSingleNg, @@ -147,23 +155,27 @@ var _ = SynchronizedBeforeSuite(func() { WithStdin(clusterutils.Reader(cfg))).To(RunSuccessfully()) // create an additional subnet to test nodegroup creation within it later on - createAdditionalSubnet(cfg) -}, func() {}) + extraSubnetID = createAdditionalSubnet(cfg) + return []byte(extraSubnetID) +}, func(subnetID []byte) { + extraSubnetID = string(subnetID) +}) var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { - Context("cluster with 2 managed and 2 unmanaged nodegroup", func() { - It("should ensure it is running and functional", func() { - By("having created an EKS cluster and 3 CloudFormation stacks") + Context("validating cluster setup", func() { + It("should have created an EKS cluster and 6 CloudFormation stacks", func() { awsConfig := NewConfig(params.Region) Expect(awsConfig).To(HaveExistingCluster(params.ClusterName, string(ekstypes.ClusterStatusActive), params.Version)) Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-cluster", params.ClusterName))) Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, drainMng))) Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, scaleMultipleMng))) - Expect(awsConfig).NotTo(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, scaleSingleNg))) - Expect(awsConfig).NotTo(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, scaleMultipleNg))) + Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, deleteNg))) + Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, scaleSingleNg))) + Expect(awsConfig).To(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-%s", params.ClusterName, scaleMultipleNg))) + }) - By("having created a valid kubectl config file") + It("should have created a valid kubectl config file", func() { kubeConfig, err := clientcmd.LoadFromFile(params.KubeconfigPath) Expect(err).ShouldNot(HaveOccurred()) @@ -173,8 +185,9 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { Expect(kubeConfig.CurrentContext).To(ContainSubstring("eksctl")) Expect(kubeConfig.CurrentContext).To(ContainSubstring(params.ClusterName)) Expect(kubeConfig.CurrentContext).To(ContainSubstring(params.Region)) + }) - By("successfully fetching the previously created cluster") + It("should successfully fetch the cluster", func() { AssertContainsCluster( params.EksctlGetCmd.WithArgs("clusters", "--all-regions"), GetClusterOutput{ @@ -183,35 +196,435 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { EksctlCreated: "True", }, ) + }) - By("successfully describing cluster's CFN stacks") + It("should successfully describe cluster's CFN stacks", func() { session := params.EksctlUtilsCmd.WithArgs("describe-stacks", "--cluster", params.ClusterName, "-o", "yaml").Run() Expect(session.ExitCode()).To(BeZero()) var stacks []*cfntypes.Stack Expect(yaml.Unmarshal(session.Out.Contents(), &stacks)).To(Succeed()) - Expect(stacks).To(HaveLen(3)) - nodegroupStack1 := stacks[0] - nodegroupStack2 := stacks[1] - clusterStack := stacks[2] - Expect(aws.ToString(clusterStack.StackName)).To(ContainSubstring(params.ClusterName)) - Expect(aws.ToString(nodegroupStack1.StackName)).To(ContainSubstring(params.ClusterName)) - Expect(aws.ToString(nodegroupStack2.StackName)).To(ContainSubstring(params.ClusterName)) - Expect(aws.ToString(clusterStack.Description)).To(Equal("EKS cluster (dedicated VPC: true, dedicated IAM: true) [created and managed by eksctl]")) - Expect(aws.ToString(nodegroupStack1.Description)).To(Equal("EKS Managed Nodes (SSH access: false) [created by eksctl]")) - Expect(aws.ToString(nodegroupStack2.Description)).To(Equal("EKS Managed Nodes (SSH access: false) [created by eksctl]")) + Expect(stacks).To(HaveLen(6)) + //nodegroupStack := stacks[0] + //clusterStack := stacks[5] + //Expect(aws.ToString(clusterStack.StackName)).To(ContainSubstring(params.ClusterName)) + //Expect(aws.ToString(nodegroupStack.StackName)).To(ContainSubstring(params.ClusterName)) + //Expect(aws.ToString(clusterStack.Description)).To(Equal("EKS cluster (dedicated VPC: true, dedicated IAM: true) [created and managed by eksctl]")) + //Expect(aws.ToString(nodegroupStack.Description)).To(Equal("EKS Managed Nodes (SSH access: false) [created by eksctl]")) }) + }) - It("should be able to manipulate iam identity mappings", func() { - var ( - expR0, expR1, expU0 string - role0, role1 iam.Identity - user0 iam.Identity - admin = "admin" - alice = "alice" - ) + Context("creating cluster workloads", func() { + var ( + err error + test *harness.Test + commonTimeout = 10 * time.Minute + ) + + BeforeEach(func() { + test, err = kube.NewTest(params.KubeconfigPath) + Expect(err).ShouldNot(HaveOccurred()) + }) + + AfterEach(func() { + test.Close() + Eventually(func() int { + return len(test.ListPods(test.Namespace, metav1.ListOptions{}).Items) + }, "3m", "1s").Should(BeZero()) + }) + + It("should deploy podinfo service to the cluster and access it via proxy", func() { + d := test.CreateDeploymentFromFile(test.Namespace, "../../data/podinfo.yaml") + test.WaitForDeploymentReady(d, commonTimeout) + + pods := test.ListPodsFromDeployment(d) + Expect(len(pods.Items)).To(Equal(2)) + + // For each pod of the Deployment, check we receive a sensible response to a + // GET request on /version. + for _, pod := range pods.Items { + Expect(pod.Namespace).To(Equal(test.Namespace)) + + req := test.PodProxyGet(&pod, "", "/version") + fmt.Fprintf(GinkgoWriter, "url = %#v", req.URL()) + + var js interface{} + test.PodProxyGetJSON(&pod, "", "/version", &js) + + Expect(js.(map[string]interface{})).To(HaveKeyWithValue("version", "1.5.1")) + } + }) + + It("should have functional DNS", func() { + d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-dns.yaml") + test.WaitForDaemonSetReady(d, commonTimeout) + ds, err := test.GetDaemonSet(test.Namespace, d.Name) + Expect(err).ShouldNot(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) + }) + + It("should have access to HTTP(S) sites", func() { + d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-http.yaml") + test.WaitForDaemonSetReady(d, commonTimeout) + ds, err := test.GetDaemonSet(test.Namespace, d.Name) + Expect(err).ShouldNot(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) + }) + }) + + Context("configuring IAM service accounts", Ordered, func() { + var ( + clientSet kubernetes.Interface + test *harness.Test + awsConfig aws.Config + oidc *iamoidc.OpenIDConnectManager + stackNamePrefix string + ) + + BeforeAll(func() { + cfg := makeClusterConfig() + + ctl, err := eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) + Expect(err).NotTo(HaveOccurred()) + + err = ctl.RefreshClusterStatus(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + + clientSet, err = ctl.NewStdClientSet(cfg) + Expect(err).ShouldNot(HaveOccurred()) + + test, err = kube.NewTest(params.KubeconfigPath) + Expect(err).ShouldNot(HaveOccurred()) + + stackNamePrefix = fmt.Sprintf("eksctl-%s-addon-iamserviceaccount-", params.ClusterName) + awsConfig = NewConfig(params.Region) + + oidc, err = ctl.NewOpenIDConnectManager(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + }) + + AfterAll(func() { + test.Close() + Eventually(func() int { + return len(test.ListPods(test.Namespace, metav1.ListOptions{}).Items) + }, "3m", "1s").Should(BeZero()) + }) + + It("should have OIDC disabled by default", func() { + exists, err := oidc.CheckProviderExists(context.Background()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(exists).To(BeFalse()) + }) + + It("should successfully enable OIDC", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "associate-iam-oidc-provider", + "--cluster", params.ClusterName, + "--approve", + )).To(RunSuccessfully()) + }) + + It("should successfully create two iamserviceaccounts", func() { + Expect([]Cmd{ + params.EksctlCreateCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "app-cache-access", + "--namespace", "app1", + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess", + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess", + "--approve", + ), + params.EksctlCreateCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "s3-reader", + "--namespace", test.Namespace, + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", + "--approve", + ), + }).To(RunSuccessfully()) + Expect(awsConfig).To(HaveExistingStack(stackNamePrefix + test.Namespace + "-s3-reader")) + Expect(awsConfig).To(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) + + sa, err := clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).Get(context.TODO(), "s3-reader", metav1.GetOptions{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(sa.Annotations).To(HaveLen(1)) + Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) + Expect(sa.Annotations[api.AnnotationEKSRoleARN]).To(MatchRegexp("^arn:aws:iam::.*:role/eksctl-" + params.ClusterName + ".*$")) + + sa, err = clientSet.CoreV1().ServiceAccounts("app1").Get(context.TODO(), "app-cache-access", metav1.GetOptions{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(sa.Annotations).To(HaveLen(1)) + Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) + Expect(sa.Annotations[api.AnnotationEKSRoleARN]).To(MatchRegexp("^arn:aws:iam::.*:role/eksctl-" + params.ClusterName + ".*$")) + }) + + It("should successfully update service account policy", func() { + Expect(params.EksctlUpdateCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "app-cache-access", + "--namespace", "app1", + "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", + "--approve", + )).To(RunSuccessfully()) + }) + + It("should successfully list both iamserviceaccounts", func() { + Expect(params.EksctlGetCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + )).To(RunSuccessfullyWithOutputString(MatchRegexp( + `(?m:^NAMESPACE\s+NAME\s+ROLE\sARN$)` + + `|(?m:^app1\s+app-cache-access\s+arn:aws:iam::.*$)` + + fmt.Sprintf(`|(?m:^%s\s+s3-reader\s+arn:aws:iam::.*$)`, test.Namespace), + ))) + }) + + It("should successfully run pods with an iamserviceaccount", func() { + d := test.CreateDeploymentFromFile(test.Namespace, "../../data/iamserviceaccount-checker.yaml") + test.WaitForDeploymentReady(d, 10*time.Minute) + + pods := test.ListPodsFromDeployment(d) + Expect(len(pods.Items)).To(Equal(2)) + + // For each pod of the Deployment, check we get expected environment variables + // via a GET request on /env. + type sessionObject struct { + AssumedRoleUser struct { + AssumedRoleID, Arn string + } + Audience, Provider, SubjectFromWebIdentityToken string + Credentials struct { + SecretAccessKey, SessionToken, Expiration, AccessKeyID string + } + } + + for _, pod := range pods.Items { + Expect(pod.Namespace).To(Equal(test.Namespace)) + + so := sessionObject{} + + var js []string + test.PodProxyGetJSON(&pod, "", "/env", &js) + + Expect(js).To(ContainElement(HavePrefix("AWS_ROLE_ARN=arn:aws:iam::"))) + Expect(js).To(ContainElement("AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token")) + Expect(js).To(ContainElement(HavePrefix("AWS_SESSION_OBJECT="))) + + for _, envVar := range js { + if strings.HasPrefix(envVar, "AWS_SESSION_OBJECT=") { + err := json.Unmarshal([]byte(strings.TrimPrefix(envVar, "AWS_SESSION_OBJECT=")), &so) + Expect(err).ShouldNot(HaveOccurred()) + } + } + + Expect(so.AssumedRoleUser.AssumedRoleID).To(HaveSuffix(":integration-test")) + + Expect(so.AssumedRoleUser.Arn).To(MatchRegexp("^arn:aws:sts::.*:assumed-role/eksctl-" + params.ClusterName + "-.*/integration-test$")) + + Expect(so.Audience).To(Equal("sts.amazonaws.com")) + + Expect(so.Provider).To(MatchRegexp("^arn:aws:iam::.*:oidc-provider/oidc.eks." + params.Region + ".amazonaws.com/id/.*$")) + + Expect(so.SubjectFromWebIdentityToken).To(Equal("system:serviceaccount:" + test.Namespace + ":s3-reader")) + + Expect(so.Credentials.SecretAccessKey).NotTo(BeEmpty()) + Expect(so.Credentials.SessionToken).NotTo(BeEmpty()) + Expect(so.Credentials.Expiration).NotTo(BeEmpty()) + Expect(so.Credentials.AccessKeyID).NotTo(BeEmpty()) + } + }) + + It("should successfully delete both iamserviceaccounts", func() { + Expect([]Cmd{ + params.EksctlDeleteCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "s3-reader", + "--namespace", test.Namespace, + "--wait", + ), + params.EksctlDeleteCmd.WithArgs( + "iamserviceaccount", + "--cluster", params.ClusterName, + "--name", "app-cache-access", + "--namespace", "app1", + "--wait", + ), + }).To(RunSuccessfully()) + Expect(awsConfig).NotTo(HaveExistingStack(stackNamePrefix + test.Namespace + "-s3-reader")) + Expect(awsConfig).NotTo(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) + }) + }) + + Context("configuring K8s API", Serial, Ordered, func() { + var ( + k8sAPICall func() error + ) + + BeforeAll(func() { + cfg := makeClusterConfig() + + ctl, err := eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) + Expect(err).NotTo(HaveOccurred()) + + err = ctl.RefreshClusterStatus(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + + clientSet, err := ctl.NewStdClientSet(cfg) + Expect(err).ShouldNot(HaveOccurred()) + + k8sAPICall = func() error { + _, err = clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) + return err + } + }) + + It("should have public access by default", func() { + Expect(k8sAPICall()).ShouldNot(HaveOccurred()) + }) + + It("should disable public access", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "set-public-access-cidrs", + "--cluster", params.ClusterName, + "1.1.1.1/32,2.2.2.0/24", + "--approve", + )).To(RunSuccessfully()) + Expect(k8sAPICall()).Should(HaveOccurred()) + }) + + It("should re-enable public access", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "set-public-access-cidrs", + "--cluster", params.ClusterName, + "0.0.0.0/0", + "--approve", + )).To(RunSuccessfully()) + Expect(k8sAPICall()).ShouldNot(HaveOccurred()) + }) + }) + + Context("configuring Cloudwatch logging", Serial, Ordered, func() { + var ( + cfg *api.ClusterConfig + ctl *eks.ClusterProvider + err error + ) + + BeforeEach(func() { + cfg = makeClusterConfig() + + ctl, err = eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should have all types disabled by default", func() { + enabled, disabled, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(0)) + Expect(disabled.List()).To(HaveLen(5)) + }) + + It("should plan to enable two of the types using flags", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", + "--cluster", params.ClusterName, + "--enable-types", "api,controllerManager", + )).To(RunSuccessfully()) + enabled, disabled, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(0)) + Expect(disabled.List()).To(HaveLen(5)) + }) + + It("should enable two of the types using flags", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", + "--cluster", params.ClusterName, + "--approve", + "--enable-types", "api,controllerManager", + )).To(RunSuccessfully()) + enabled, disabled, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(2)) + Expect(disabled.List()).To(HaveLen(3)) + Expect(enabled.List()).To(ConsistOf("api", "controllerManager")) + Expect(disabled.List()).To(ConsistOf("audit", "authenticator", "scheduler")) + }) + + It("should enable all of the types using --enable-types=all", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", + "--cluster", params.ClusterName, + "--approve", + "--enable-types", "all", + )).To(RunSuccessfully()) + enabled, disabled, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(5)) + Expect(disabled.List()).To(HaveLen(0)) + }) + + It("should enable all but one type", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", + "--cluster", params.ClusterName, + "--approve", + "--enable-types", "all", + "--disable-types", "controllerManager", + )).To(RunSuccessfully()) + enabled, disabled, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(4)) + Expect(disabled.List()).To(HaveLen(1)) + Expect(enabled.List()).To(ConsistOf("api", "audit", "authenticator", "scheduler")) + Expect(disabled.List()).To(ConsistOf("controllerManager")) + }) + + It("should disable all but one type", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", + "--cluster", params.ClusterName, + "--approve", + "--disable-types", "all", + "--enable-types", "controllerManager", + )).To(RunSuccessfully()) + enabled, disabled, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(disabled.List()).To(HaveLen(4)) + Expect(enabled.List()).To(HaveLen(1)) + Expect(disabled.List()).To(ConsistOf("api", "audit", "authenticator", "scheduler")) + Expect(enabled.List()).To(ConsistOf("controllerManager")) + }) + + It("should disable all of the types using --disable-types=all", func() { + Expect(params.EksctlUtilsCmd.WithArgs( + "update-cluster-logging", + "--cluster", params.ClusterName, + "--approve", + "--disable-types", "all", + )).To(RunSuccessfully()) + enabled, disabled, err := ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) + Expect(err).ShouldNot(HaveOccurred()) + Expect(enabled.List()).To(HaveLen(0)) + Expect(disabled.List()).To(HaveLen(5)) + Expect(disabled.HasAll(api.SupportedCloudWatchClusterLogTypes()...)).To(BeTrue()) + }) + }) + + Context("configuring iam identity mappings", Serial, Ordered, func() { + var ( + expR0, expR1, expU0 string + role0, role1 iam.Identity + user0 iam.Identity + admin = "admin" + alice = "alice" + ) + BeforeAll(func() { roleCanonicalArn := "arn:aws:iam::123456:role/eksctl-testing-XYZ" - var err error role0 = iam.RoleIdentity{ RoleARN: roleCanonicalArn, KubernetesIdentity: iam.KubernetesIdentity{ @@ -247,27 +660,30 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { bs, err = yaml.Marshal([]iam.Identity{user0}) Expect(err).ShouldNot(HaveOccurred()) expU0 = string(bs) + }) - By("failing to get unknown role mapping") + It("should fail to get unknown role mapping", func() { Expect(params.EksctlGetCmd.WithArgs( "iamidentitymapping", "--cluster", params.ClusterName, "--arn", "arn:aws:iam::123456:role/idontexist", "-o", "yaml", )).NotTo(RunSuccessfully()) + }) - By("failing to get unknown user mapping") + It("should fail to get unknown user mapping", func() { Expect(params.EksctlGetCmd.WithArgs( "iamidentitymapping", "--cluster", params.ClusterName, "--arn", "arn:aws:iam::123456:user/bob", "-o", "yaml", )).NotTo(RunSuccessfully()) + }) - By("creating role mappings") + It("should create role mappings", func() { Expect(params.EksctlCreateCmd.WithArgs( "iamidentitymapping", - "--name", params.ClusterName, + "--cluster", params.ClusterName, "--arn", role0.ARN(), "--username", role0.Username(), "--group", role0.Groups()[0], @@ -275,15 +691,16 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { )).To(RunSuccessfully()) Expect(params.EksctlGetCmd.WithArgs( "iamidentitymapping", - "--name", params.ClusterName, + "--cluster", params.ClusterName, "--arn", role0.ARN(), "-o", "yaml", )).To(RunSuccessfullyWithOutputString(MatchYAML(expR0))) + }) - By("creating user mappings") + It("should create user mappings", func() { Expect(params.EksctlCreateCmd.WithArgs( "iamidentitymapping", - "--name", params.ClusterName, + "--cluster", params.ClusterName, "--arn", user0.ARN(), "--username", user0.Username(), "--group", user0.Groups()[0], @@ -295,11 +712,12 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { "--arn", user0.ARN(), "-o", "yaml", )).To(RunSuccessfullyWithOutputString(MatchYAML(expU0))) + }) - By("creating a duplicate role mapping") + It("should create a duplicate role mapping", func() { Expect(params.EksctlCreateCmd.WithArgs( "iamidentitymapping", - "--name", params.ClusterName, + "--cluster", params.ClusterName, "--arn", role0.ARN(), "--username", role0.Username(), "--group", role0.Groups()[0], @@ -307,12 +725,13 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { )).To(RunSuccessfully()) Expect(params.EksctlGetCmd.WithArgs( "iamidentitymapping", - "--name", params.ClusterName, + "--cluster", params.ClusterName, "--arn", role0.ARN(), "-o", "yaml", )).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR0))) + }) - By("creating a duplicate user mapping") + It("should create a duplicate user mapping", func() { Expect(params.EksctlCreateCmd.WithArgs( "iamidentitymapping", "--cluster", params.ClusterName, @@ -327,8 +746,9 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { "--arn", user0.ARN(), "-o", "yaml", )).To(RunSuccessfullyWithOutputString(MatchYAML(expU0 + expU0))) + }) - By("creating a duplicate role mapping with different identity") + It("should create a duplicate role mapping with different identity", func() { Expect(params.EksctlCreateCmd.WithArgs( "iamidentitymapping", "--cluster", params.ClusterName, @@ -341,8 +761,9 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { "--arn", role1.ARN(), "-o", "yaml", )).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR0 + expR1))) + }) - By("deleting a single role mapping (fifo)") + It("should delete a single role mapping (fifo)", func() { Expect(params.EksctlDeleteCmd.WithArgs( "iamidentitymapping", "--cluster", params.ClusterName, @@ -354,29 +775,32 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { "--arn", role1.ARN(), "-o", "yaml", )).To(RunSuccessfullyWithOutputString(MatchYAML(expR0 + expR1))) + }) - By("failing when deleting unknown mapping") + It("should fail to delete unknown mapping", func() { Expect(params.EksctlDeleteCmd.WithArgs( "iamidentitymapping", "--cluster", params.ClusterName, "--arn", "arn:aws:iam::123456:role/idontexist", )).NotTo(RunSuccessfully()) + }) - By("deleting duplicate role mappings with --all") + It("should delete duplicate role mappings with --all", func() { Expect(params.EksctlDeleteCmd.WithArgs( "iamidentitymapping", - "--name", params.ClusterName, + "--cluster", params.ClusterName, "--arn", role1.ARN(), "--all", )).To(RunSuccessfully()) Expect(params.EksctlGetCmd.WithArgs( "iamidentitymapping", - "--name", params.ClusterName, + "--cluster", params.ClusterName, "--arn", role1.ARN(), "-o", "yaml", )).NotTo(RunSuccessfully()) + }) - By("deleting duplicate user mappings with --all") + It("should delete duplicate user mappings with --all", func() { Expect(params.EksctlDeleteCmd.WithArgs( "iamidentitymapping", "--cluster", params.ClusterName, @@ -390,380 +814,6 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { "-o", "yaml", )).NotTo(RunSuccessfully()) }) - - Context("manipulate K8s API, Cloudwatch logging and IAM service accounts", func() { - var ( - cfg *api.ClusterConfig - ctl *eks.ClusterProvider - clientSet kubernetes.Interface - err error - ) - BeforeEach(func() { - cfg = makeClusterConfig() - - ctl, err = eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) - Expect(err).NotTo(HaveOccurred()) - - err = ctl.RefreshClusterStatus(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - - clientSet, err = ctl.NewStdClientSet(cfg) - Expect(err).ShouldNot(HaveOccurred()) - }) - - It("should be able to toggle Kubernetes API access", func() { - k8sAPICall := func() error { - _, err = clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) - return err - } - - By("being publicly accessible by default") - _, err = clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).List(context.TODO(), metav1.ListOptions{}) - Expect(k8sAPICall()).ShouldNot(HaveOccurred()) - - By("disabling public access") - Expect(params.EksctlUtilsCmd.WithArgs( - "set-public-access-cidrs", - "--cluster", params.ClusterName, - "1.1.1.1/32,2.2.2.0/24", - "--approve", - )).To(RunSuccessfully()) - Expect(k8sAPICall()).Should(HaveOccurred()) - - By("re-enabling public access") - Expect(params.EksctlUtilsCmd.WithArgs( - "set-public-access-cidrs", - "--cluster", params.ClusterName, - "0.0.0.0/0", - "--approve", - )).To(RunSuccessfully()) - Expect(k8sAPICall()).ShouldNot(HaveOccurred()) - }) - - It("should be able to configure CloudWatch logging", func() { - var ( - enabled, disabled sets.String - ) - - By("having all types disabled by default") - enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(0)) - Expect(disabled.List()).To(HaveLen(5)) - - By("planning to enable two of the types using flags") - Expect(params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--enable-types", "api,controllerManager", - )).To(RunSuccessfully()) - enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(0)) - Expect(disabled.List()).To(HaveLen(5)) - - By("enabling two of the types using flags") - Expect(params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--approve", - "--enable-types", "api,controllerManager", - )).To(RunSuccessfully()) - enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(2)) - Expect(disabled.List()).To(HaveLen(3)) - Expect(enabled.List()).To(ConsistOf("api", "controllerManager")) - Expect(disabled.List()).To(ConsistOf("audit", "authenticator", "scheduler")) - - By("enabling all of the types using --enable-types=all") - Expect(params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--approve", - "--enable-types", "all", - )).To(RunSuccessfully()) - enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(5)) - Expect(disabled.List()).To(HaveLen(0)) - - By("enabling all but one type") - Expect(params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--approve", - "--enable-types", "all", - "--disable-types", "controllerManager", - )).To(RunSuccessfully()) - enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(4)) - Expect(disabled.List()).To(HaveLen(1)) - Expect(enabled.List()).To(ConsistOf("api", "audit", "authenticator", "scheduler")) - Expect(disabled.List()).To(ConsistOf("controllerManager")) - - By("disabling all but one type") - Expect(params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--approve", - "--disable-types", "all", - "--enable-types", "controllerManager", - )).To(RunSuccessfully()) - enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(disabled.List()).To(HaveLen(4)) - Expect(enabled.List()).To(HaveLen(1)) - Expect(disabled.List()).To(ConsistOf("api", "audit", "authenticator", "scheduler")) - Expect(enabled.List()).To(ConsistOf("controllerManager")) - - By("disabling all of the types using --disable-types=all") - Expect(params.EksctlUtilsCmd.WithArgs( - "update-cluster-logging", - "--cluster", params.ClusterName, - "--approve", - "--disable-types", "all", - )).To(RunSuccessfully()) - enabled, disabled, err = ctl.GetCurrentClusterConfigForLogging(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - Expect(enabled.List()).To(HaveLen(0)) - Expect(disabled.List()).To(HaveLen(5)) - Expect(disabled.HasAll(api.SupportedCloudWatchClusterLogTypes()...)).To(BeTrue()) - }) - - It("should be able to create, update, delete iamserviceaccounts", func() { - - stackNamePrefix := fmt.Sprintf("eksctl-%s-addon-iamserviceaccount-", params.ClusterName) - awsConfig := NewConfig(params.Region) - oidc, err := ctl.NewOpenIDConnectManager(context.Background(), cfg) - Expect(err).ShouldNot(HaveOccurred()) - - By("having OIDC disabled by default") - exists, err := oidc.CheckProviderExists(context.Background()) - Expect(err).ShouldNot(HaveOccurred()) - Expect(exists).To(BeFalse()) - - By("enabling OIDC") - Expect(params.EksctlUtilsCmd.WithArgs( - "associate-iam-oidc-provider", - "--cluster", params.ClusterName, - "--approve", - )).To(RunSuccessfully()) - - By("creating two iamserviceaccounts") - Expect([]Cmd{ - params.EksctlCreateCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "app-cache-access", - "--namespace", "app1", - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess", - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess", - "--approve", - ), - params.EksctlCreateCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "s3-read-only", - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", - "--approve", - ), - }).To(RunSuccessfully()) - Expect(awsConfig).To(HaveExistingStack(stackNamePrefix + "default-s3-read-only")) - Expect(awsConfig).To(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) - - sa, err := clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).Get(context.TODO(), "s3-read-only", metav1.GetOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - Expect(sa.Annotations).To(HaveLen(1)) - Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) - Expect(sa.Annotations[api.AnnotationEKSRoleARN]).To(MatchRegexp("^arn:aws:iam::.*:role/eksctl-" + params.ClusterName + ".*$")) - - sa, err = clientSet.CoreV1().ServiceAccounts("app1").Get(context.TODO(), "app-cache-access", metav1.GetOptions{}) - Expect(err).ShouldNot(HaveOccurred()) - Expect(sa.Annotations).To(HaveLen(1)) - Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) - Expect(sa.Annotations[api.AnnotationEKSRoleARN]).To(MatchRegexp("^arn:aws:iam::.*:role/eksctl-" + params.ClusterName + ".*$")) - - By("updating service account policy") - Expect(params.EksctlUpdateCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "app-cache-access", - "--namespace", "app1", - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", - "--approve", - )).To(RunSuccessfully()) - - By("listing both iamserviceaccounts") - Expect(params.EksctlGetCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - )).To(RunSuccessfullyWithOutputString(MatchRegexp( - `(?m:^NAMESPACE\s+NAME\s+ROLE\sARN$)` + - `|(?m:^app1\s+app-cache-access\s+arn:aws:iam::.*$)` + - `|(?m:^default\s+s3-read-only\s+arn:aws:iam::.*$)`, - ))) - - By("deleting both iamserviceaccounts") - Expect([]Cmd{ - params.EksctlDeleteCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "s3-read-only", - "--wait", - ), - params.EksctlDeleteCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "app-cache-access", - "--namespace", "app1", - "--wait", - ), - }).To(RunSuccessfully()) - Expect(awsConfig).NotTo(HaveExistingStack(stackNamePrefix + "default-s3-read-only")) - Expect(awsConfig).NotTo(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) - }) - }) - }) - - Context("create test workloads", func() { - var ( - err error - test *harness.Test - commonTimeout = 10 * time.Minute - ) - - BeforeEach(func() { - test, err = kube.NewTest(params.KubeconfigPath) - Expect(err).ShouldNot(HaveOccurred()) - }) - - AfterEach(func() { - test.Close() - Eventually(func() int { - return len(test.ListPods(test.Namespace, metav1.ListOptions{}).Items) - }, "3m", "1s").Should(BeZero()) - }) - - It("should deploy podinfo service to the cluster and access it via proxy", func() { - d := test.CreateDeploymentFromFile(test.Namespace, "../../data/podinfo.yaml") - test.WaitForDeploymentReady(d, commonTimeout) - - pods := test.ListPodsFromDeployment(d) - Expect(len(pods.Items)).To(Equal(2)) - - // For each pod of the Deployment, check we receive a sensible response to a - // GET request on /version. - for _, pod := range pods.Items { - Expect(pod.Namespace).To(Equal(test.Namespace)) - - req := test.PodProxyGet(&pod, "", "/version") - fmt.Fprintf(GinkgoWriter, "url = %#v", req.URL()) - - var js interface{} - test.PodProxyGetJSON(&pod, "", "/version", &js) - - Expect(js.(map[string]interface{})).To(HaveKeyWithValue("version", "1.5.1")) - } - }) - - It("should have functional DNS", func() { - d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-dns.yaml") - - test.WaitForDaemonSetReady(d, commonTimeout) - - { - ds, err := test.GetDaemonSet(test.Namespace, d.Name) - Expect(err).ShouldNot(HaveOccurred()) - fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) - } - }) - - It("should have access to HTTP(S) sites", func() { - d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-http.yaml") - - test.WaitForDaemonSetReady(d, commonTimeout) - - { - ds, err := test.GetDaemonSet(test.Namespace, d.Name) - Expect(err).ShouldNot(HaveOccurred()) - fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) - } - }) - - It("should be able to run pods with an iamserviceaccount", func() { - createCmd := params.EksctlCreateCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "s3-reader", - "--namespace", test.Namespace, - "--attach-policy-arn", "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess", - "--approve", - ) - - Expect(createCmd).To(RunSuccessfully()) - - d := test.CreateDeploymentFromFile(test.Namespace, "../../data/iamserviceaccount-checker.yaml") - test.WaitForDeploymentReady(d, commonTimeout) - - pods := test.ListPodsFromDeployment(d) - Expect(len(pods.Items)).To(Equal(2)) - - // For each pod of the Deployment, check we get expected environment variables - // via a GET request on /env. - type sessionObject struct { - AssumedRoleUser struct { - AssumedRoleID, Arn string - } - Audience, Provider, SubjectFromWebIdentityToken string - Credentials struct { - SecretAccessKey, SessionToken, Expiration, AccessKeyID string - } - } - - for _, pod := range pods.Items { - Expect(pod.Namespace).To(Equal(test.Namespace)) - - so := sessionObject{} - - var js []string - test.PodProxyGetJSON(&pod, "", "/env", &js) - - Expect(js).To(ContainElement(HavePrefix("AWS_ROLE_ARN=arn:aws:iam::"))) - Expect(js).To(ContainElement("AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token")) - Expect(js).To(ContainElement(HavePrefix("AWS_SESSION_OBJECT="))) - - for _, envVar := range js { - if strings.HasPrefix(envVar, "AWS_SESSION_OBJECT=") { - err := json.Unmarshal([]byte(strings.TrimPrefix(envVar, "AWS_SESSION_OBJECT=")), &so) - Expect(err).ShouldNot(HaveOccurred()) - } - } - - Expect(so.AssumedRoleUser.AssumedRoleID).To(HaveSuffix(":integration-test")) - - Expect(so.AssumedRoleUser.Arn).To(MatchRegexp("^arn:aws:sts::.*:assumed-role/eksctl-" + params.ClusterName + "-.*/integration-test$")) - - Expect(so.Audience).To(Equal("sts.amazonaws.com")) - - Expect(so.Provider).To(MatchRegexp("^arn:aws:iam::.*:oidc-provider/oidc.eks." + params.Region + ".amazonaws.com/id/.*$")) - - Expect(so.SubjectFromWebIdentityToken).To(Equal("system:serviceaccount:" + test.Namespace + ":s3-reader")) - - Expect(so.Credentials.SecretAccessKey).NotTo(BeEmpty()) - Expect(so.Credentials.SessionToken).NotTo(BeEmpty()) - Expect(so.Credentials.Expiration).NotTo(BeEmpty()) - Expect(so.Credentials.AccessKeyID).NotTo(BeEmpty()) - } - - Expect(params.EksctlDeleteCmd.WithArgs( - "iamserviceaccount", - "--cluster", params.ClusterName, - "--name", "s3-reader", - "--namespace", test.Namespace, - )).To(RunSuccessfully()) - }) }) Context("creating nodegroups", func() { @@ -833,22 +883,16 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { Context("creating nodegroups within a new subnet", func() { var ( - subnet ec2types.Subnet + vpcID string subnetName string ) BeforeEach(func() { ec2 := awsec2.NewFromConfig(NewConfig(params.Region)) output, err := ec2.DescribeSubnets(context.Background(), &awsec2.DescribeSubnetsInput{ - Filters: []ec2types.Filter{ - { - Name: aws.String("availability-zone"), - Values: []string{"us-west-2a"}, - }, - }, + SubnetIds: []string{extraSubnetID}, }) Expect(err).NotTo(HaveOccurred()) - Expect(len(output.Subnets)).To(Equal(1)) - subnet = output.Subnets[0] + vpcID = *output.Subnets[0].VpcId subnetName = "new-subnet" }) @@ -856,12 +900,12 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { clusterConfig := makeClusterConfig() clusterConfig.VPC = &api.ClusterVPC{ Network: api.Network{ - ID: *subnet.VpcId, + ID: vpcID, }, Subnets: &api.ClusterSubnets{ Public: api.AZSubnetMapping{ subnetName: api.AZSubnetSpec{ - ID: *subnet.SubnetId, + ID: extraSubnetID, }, }, }, @@ -895,7 +939,7 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { "--cluster", params.ClusterName, "--nodes", "1", "--node-type", "p2.xlarge", - "--subnet-ids", *subnet.SubnetId, + "--subnet-ids", extraSubnetID, newSubnetCLIMng, )).To(RunSuccessfully()) }) @@ -903,7 +947,6 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { }) Context("scaling nodegroup(s)", func() { - scaleNgCmd := func(desiredCapacity string) runner.Cmd { return params.EksctlScaleNodeGroupCmd.WithArgs( "--cluster", params.ClusterName, @@ -913,7 +956,6 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { "--name", scaleSingleNg, ) } - getNgCmd := func(ngName string) runner.Cmd { return params.EksctlGetCmd.WithArgs( "nodegroup", @@ -990,6 +1032,16 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { )).To(RunSuccessfully()) }) }) + + Context("deleting nodegroup(s)", func() { + It("should be able to delete an unmanaged nodegroup", func() { + Expect(params.EksctlDeleteCmd.WithArgs( + "nodegroup", + "--cluster", params.ClusterName, + "--name", deleteNg, + )).To(RunSuccessfully()) + }) + }) }) var _ = SynchronizedAfterSuite(func() {}, func() { @@ -998,7 +1050,7 @@ var _ = SynchronizedAfterSuite(func() {}, func() { "-o", "json", "--cluster", params.ClusterName, )).To(RunSuccessfullyWithOutputString(BeNodeGroupsWithNamesWhich( - HaveLen(9), + // HaveLen(9), ContainElement(taintsNg1), ContainElement(taintsNg2), ContainElement(scaleSingleNg), @@ -1018,7 +1070,7 @@ var _ = SynchronizedAfterSuite(func() {}, func() { os.RemoveAll(params.TestDirectory) }) -func createAdditionalSubnet(cfg *api.ClusterConfig) { +func createAdditionalSubnet(cfg *api.ClusterConfig) string { ctl, err := eks.New(context.TODO(), &api.ProviderConfig{Region: params.Region}, cfg) Expect(err).NotTo(HaveOccurred()) cl, err := ctl.GetCluster(context.Background(), params.ClusterName) @@ -1087,4 +1139,6 @@ func createAdditionalSubnet(cfg *api.ClusterConfig) { SubnetId: subnet.SubnetId, }) Expect(err).NotTo(HaveOccurred(), routput) + + return *subnet.SubnetId } diff --git a/integration/tests/managed/managed_nodegroup_test.go b/integration/tests/managed/managed_nodegroup_test.go index 32a4940889..f5fee494c4 100644 --- a/integration/tests/managed/managed_nodegroup_test.go +++ b/integration/tests/managed/managed_nodegroup_test.go @@ -6,10 +6,13 @@ package managed import ( "context" "fmt" + "strings" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" awseks "github.com/aws/aws-sdk-go-v2/service/eks" harness "github.com/dlespiau/kube-test-harness" @@ -27,6 +30,7 @@ import ( clusterutils "github.com/weaveworks/eksctl/integration/utilities/cluster" "github.com/weaveworks/eksctl/integration/utilities/kube" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/awsapi" "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/testutils" ) @@ -521,6 +525,107 @@ var _ = Describe("(Integration) Create Managed Nodegroups", func() { Expect(cmd).To(RunSuccessfully()) }) }) + + Context("eksctl utils update-cluster-vpc-config", Serial, func() { + makeAWSProvider := func(ctx context.Context, clusterConfig *api.ClusterConfig) api.ClusterProvider { + clusterProvider, err := eks.New(ctx, &api.ProviderConfig{Region: params.Region}, clusterConfig) + Expect(err).NotTo(HaveOccurred()) + return clusterProvider.AWSProvider + } + getPrivateSubnetIDs := func(ctx context.Context, ec2API awsapi.EC2, vpcID string) []string { + out, err := ec2API.DescribeSubnets(ctx, &ec2.DescribeSubnetsInput{ + Filters: []ec2types.Filter{ + { + Name: aws.String("vpc-id"), + Values: []string{vpcID}, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + var subnetIDs []string + for _, s := range out.Subnets { + if !*s.MapPublicIpOnLaunch { + subnetIDs = append(subnetIDs, *s.SubnetId) + } + } + return subnetIDs + } + It("should update the VPC config", func() { + clusterConfig := makeClusterConfig() + ctx := context.Background() + awsProvider := makeAWSProvider(ctx, clusterConfig) + cluster, err := awsProvider.EKS().DescribeCluster(ctx, &awseks.DescribeClusterInput{ + Name: aws.String(params.ClusterName), + }) + Expect(err).NotTo(HaveOccurred(), "error describing cluster") + clusterSubnetIDs := getPrivateSubnetIDs(ctx, awsProvider.EC2(), *cluster.Cluster.ResourcesVpcConfig.VpcId) + Expect(len(cluster.Cluster.ResourcesVpcConfig.SecurityGroupIds) > 0).To(BeTrue(), "at least one security group ID must be associated with the cluster") + + clusterVPC := &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PrivateAccess: api.Enabled(), + PublicAccess: api.Enabled(), + }, + PublicAccessCIDRs: []string{"127.0.0.1/32"}, + ControlPlaneSubnetIDs: clusterSubnetIDs, + ControlPlaneSecurityGroupIDs: []string{cluster.Cluster.ResourcesVpcConfig.SecurityGroupIds[0]}, + } + By("accepting CLI options") + cmd := params.EksctlUtilsCmd.WithArgs( + "update-cluster-vpc-config", + "--cluster", params.ClusterName, + "--private-access", + "--public-access", + "--public-access-cidrs", strings.Join(clusterVPC.PublicAccessCIDRs, ","), + "--control-plane-subnet-ids", strings.Join(clusterVPC.ControlPlaneSubnetIDs, ","), + "--control-plane-security-group-ids", strings.Join(clusterVPC.ControlPlaneSecurityGroupIDs, ","), + "-v4", + "--approve", + ). + WithTimeout(45 * time.Minute) + session := cmd.Run() + Expect(session.ExitCode()).To(Equal(0)) + + formatWithClusterAndRegion := func(format string, values ...any) string { + return fmt.Sprintf(format, append([]any{params.ClusterName, params.Region}, values...)...) + } + Expect(strings.Split(string(session.Buffer().Contents()), "\n")).To(ContainElements( + ContainSubstring(formatWithClusterAndRegion("control plane subnets and security groups for cluster %q in %q have been updated to: "+ + "controlPlaneSubnetIDs=%v, controlPlaneSecurityGroupIDs=%v", clusterVPC.ControlPlaneSubnetIDs, clusterVPC.ControlPlaneSecurityGroupIDs)), + ContainSubstring(formatWithClusterAndRegion("Kubernetes API endpoint access for cluster %q in %q has been updated to: privateAccess=%v, publicAccess=%v", + *clusterVPC.ClusterEndpoints.PrivateAccess, *clusterVPC.ClusterEndpoints.PublicAccess)), + ContainSubstring(formatWithClusterAndRegion("public access CIDRs for cluster %q in %q have been updated to: %v", clusterVPC.PublicAccessCIDRs)), + )) + + By("accepting a config file") + clusterConfig.VPC = clusterVPC + cmd = params.EksctlUtilsCmd.WithArgs( + "update-cluster-vpc-config", + "--config-file", "-", + "-v4", + "--approve", + ). + WithoutArg("--region", params.Region). + WithStdin(clusterutils.Reader(clusterConfig)) + session = cmd.Run() + Expect(session.ExitCode()).To(Equal(0)) + Expect(strings.Split(string(session.Buffer().Contents()), "\n")).To(ContainElements( + ContainSubstring(formatWithClusterAndRegion("Kubernetes API endpoint access for cluster %q in %q is already up-to-date")), + ContainSubstring(formatWithClusterAndRegion("control plane subnet IDs for cluster %q in %q are already up-to-date")), + ContainSubstring(formatWithClusterAndRegion("control plane security group IDs for cluster %q in %q are already up-to-date")), + )) + + By("resetting public access CIDRs") + cmd = params.EksctlUtilsCmd.WithArgs( + "update-cluster-vpc-config", + "--cluster", params.ClusterName, + "--public-access-cidrs", "0.0.0.0/0", + "-v4", + "--approve", + ) + Expect(cmd).To(RunSuccessfully()) + }) + }) }) var _ = SynchronizedAfterSuite(func() {}, func() { diff --git a/pkg/actions/addon/tasks.go b/pkg/actions/addon/tasks.go index d81837335a..f0f618b48f 100644 --- a/pkg/actions/addon/tasks.go +++ b/pkg/actions/addon/tasks.go @@ -119,8 +119,6 @@ type deleteAddonIAMTask struct { func (t *deleteAddonIAMTask) Describe() string { return t.info } func (t *deleteAddonIAMTask) Do(errorCh chan error) error { - defer close(errorCh) - errMsg := fmt.Sprintf("deleting addon IAM %q", *t.stack.StackName) if t.wait { if err := t.stackManager.DeleteStackBySpecSync(t.ctx, t.stack, errorCh); err != nil { @@ -128,6 +126,7 @@ func (t *deleteAddonIAMTask) Do(errorCh chan error) error { } return nil } + defer close(errorCh) if _, err := t.stackManager.DeleteStackBySpec(t.ctx, t.stack); err != nil { return fmt.Errorf("%s: %w", errMsg, err) } diff --git a/pkg/apis/eksctl.io/v1alpha5/assets/schema.json b/pkg/apis/eksctl.io/v1alpha5/assets/schema.json index 122c0a0fde..c21d94e023 100755 --- a/pkg/apis/eksctl.io/v1alpha5/assets/schema.json +++ b/pkg/apis/eksctl.io/v1alpha5/assets/schema.json @@ -655,6 +655,22 @@ "description": "See [managing access to API](/usage/vpc-networking/#managing-access-to-the-kubernetes-api-server-endpoints)", "x-intellij-html-description": "See managing access to API" }, + "controlPlaneSecurityGroupIDs": { + "items": { + "type": "string" + }, + "type": "array", + "description": "configures the security groups for the control plane.", + "x-intellij-html-description": "configures the security groups for the control plane." + }, + "controlPlaneSubnetIDs": { + "items": { + "type": "string" + }, + "type": "array", + "description": "configures the subnets for the control plane.", + "x-intellij-html-description": "configures the subnets for the control plane." + }, "extraCIDRs": { "items": { "type": "string" @@ -733,7 +749,9 @@ "autoAllocateIPv6", "nat", "clusterEndpoints", - "publicAccessCIDRs" + "publicAccessCIDRs", + "controlPlaneSubnetIDs", + "controlPlaneSecurityGroupIDs" ], "additionalProperties": false, "description": "holds global subnet and all child subnets", diff --git a/pkg/apis/eksctl.io/v1alpha5/types.go b/pkg/apis/eksctl.io/v1alpha5/types.go index 09b2c88491..19ae4bb73f 100644 --- a/pkg/apis/eksctl.io/v1alpha5/types.go +++ b/pkg/apis/eksctl.io/v1alpha5/types.go @@ -957,6 +957,9 @@ type OutpostInfo interface { GetOutpost() *Outpost } +// ErrUnsupportedLocalCluster is an error for when an unsupported operation is attempted on a local cluster. +var ErrUnsupportedLocalCluster = errors.New("this operation is not supported on Outposts clusters") + // Karpenter provides configuration options type Karpenter struct { // Version defines the Karpenter version to install diff --git a/pkg/apis/eksctl.io/v1alpha5/validation.go b/pkg/apis/eksctl.io/v1alpha5/validation.go index fd5d1c5e2f..bd42ffcb32 100644 --- a/pkg/apis/eksctl.io/v1alpha5/validation.go +++ b/pkg/apis/eksctl.io/v1alpha5/validation.go @@ -318,6 +318,10 @@ func (c *ClusterConfig) ValidateVPCConfig() error { c.VPC.ExtraIPv6CIDRs = cidrs } + if c.VPC.SecurityGroup != "" && len(c.VPC.ControlPlaneSecurityGroupIDs) > 0 { + return errors.New("only one of vpc.securityGroup and vpc.controlPlaneSecurityGroupIDs can be specified") + } + if (c.VPC.IPv6Cidr != "" || c.VPC.IPv6Pool != "") && !c.IPv6Enabled() { return fmt.Errorf("Ipv6Cidr and Ipv6CidrPool are only supported when IPFamily is set to IPv6") } diff --git a/pkg/apis/eksctl.io/v1alpha5/validation_test.go b/pkg/apis/eksctl.io/v1alpha5/validation_test.go index ce3abd67d8..d835aeb5a4 100644 --- a/pkg/apis/eksctl.io/v1alpha5/validation_test.go +++ b/pkg/apis/eksctl.io/v1alpha5/validation_test.go @@ -1332,6 +1332,41 @@ var _ = Describe("ClusterConfig validation", func() { }) }) + + type vpcSecurityGroupEntry struct { + updateVPC func(*api.ClusterVPC) + expectedErr string + } + DescribeTable("vpc.securityGroup and vpc.controlPlaneSecurityGroupIDs", func(e vpcSecurityGroupEntry) { + e.updateVPC(cfg.VPC) + err := cfg.ValidateVPCConfig() + if e.expectedErr != "" { + Expect(err).To(MatchError(ContainSubstring(e.expectedErr))) + } else { + Expect(err).NotTo(HaveOccurred()) + } + }, + Entry("both set", vpcSecurityGroupEntry{ + updateVPC: func(v *api.ClusterVPC) { + v.SecurityGroup = "sg-1234" + v.ControlPlaneSecurityGroupIDs = []string{"sg-1234"} + }, + expectedErr: "only one of vpc.securityGroup and vpc.controlPlaneSecurityGroupIDs can be specified", + }), + Entry("vpc.securityGroup set", vpcSecurityGroupEntry{ + updateVPC: func(v *api.ClusterVPC) { + v.SecurityGroup = "sg-1234" + }, + }), + Entry("vpc.controlPlaneSecurityGroupIDs set", vpcSecurityGroupEntry{ + updateVPC: func(v *api.ClusterVPC) { + v.ControlPlaneSecurityGroupIDs = []string{"sg-1234"} + }, + }), + Entry("neither set", vpcSecurityGroupEntry{ + updateVPC: func(v *api.ClusterVPC) {}, + }), + ) }) Describe("ValidatePrivateCluster", func() { diff --git a/pkg/apis/eksctl.io/v1alpha5/vpc.go b/pkg/apis/eksctl.io/v1alpha5/vpc.go index 3ed1ccdb8d..14418ea4fa 100644 --- a/pkg/apis/eksctl.io/v1alpha5/vpc.go +++ b/pkg/apis/eksctl.io/v1alpha5/vpc.go @@ -175,6 +175,12 @@ type ( // k8s API endpoint // +optional PublicAccessCIDRs []string `json:"publicAccessCIDRs,omitempty"` + // ControlPlaneSubnetIDs configures the subnets for the control plane. + // +optional + ControlPlaneSubnetIDs []string `json:"controlPlaneSubnetIDs,omitempty"` + // ControlPlaneSecurityGroupIDs configures the security groups for the control plane. + // +optional + ControlPlaneSecurityGroupIDs []string `json:"controlPlaneSecurityGroupIDs,omitempty"` } // ClusterSubnets holds private and public subnets ClusterSubnets struct { diff --git a/pkg/apis/eksctl.io/v1alpha5/zz_generated.deepcopy.go b/pkg/apis/eksctl.io/v1alpha5/zz_generated.deepcopy.go index 90ec1476ea..40c5a8092d 100644 --- a/pkg/apis/eksctl.io/v1alpha5/zz_generated.deepcopy.go +++ b/pkg/apis/eksctl.io/v1alpha5/zz_generated.deepcopy.go @@ -724,6 +724,16 @@ func (in *ClusterVPC) DeepCopyInto(out *ClusterVPC) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ControlPlaneSubnetIDs != nil { + in, out := &in.ControlPlaneSubnetIDs, &out.ControlPlaneSubnetIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ControlPlaneSecurityGroupIDs != nil { + in, out := &in.ControlPlaneSecurityGroupIDs, &out.ControlPlaneSecurityGroupIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/cfn/builder/cluster.go b/pkg/cfn/builder/cluster.go index aaf0614639..a60ff70017 100644 --- a/pkg/cfn/builder/cluster.go +++ b/pkg/cfn/builder/cluster.go @@ -27,7 +27,7 @@ type ClusterResourceSet struct { ec2API awsapi.EC2 region string vpcResourceSet VPCResourceSet - securityGroups []*gfnt.Value + securityGroups *gfnt.Value } // NewClusterResourceSet returns a resource set for the new cluster. @@ -115,7 +115,13 @@ func (c *ClusterResourceSet) AddAllResources(ctx context.Context) error { func (c *ClusterResourceSet) addResourcesForSecurityGroups(vpcID *gfnt.Value) *clusterSecurityGroup { var refControlPlaneSG, refClusterSharedNodeSG *gfnt.Value - if c.spec.VPC.SecurityGroup == "" { + if sg := c.spec.VPC.SecurityGroup; sg != "" { + refControlPlaneSG = gfnt.NewString(sg) + c.securityGroups = gfnt.NewStringSlice(sg) + } else if securityGroupIDs := c.spec.VPC.ControlPlaneSecurityGroupIDs; len(securityGroupIDs) > 0 { + refControlPlaneSG = gfnt.NewString(securityGroupIDs[0]) + c.securityGroups = gfnt.NewStringSlice(securityGroupIDs...) + } else { refControlPlaneSG = c.newResource(cfnControlPlaneSGResource, &gfnec2.SecurityGroup{ GroupDescription: gfnt.NewString("Communication between the control plane and worker nodegroups"), VpcId: vpcID, @@ -146,10 +152,8 @@ func (c *ClusterResourceSet) addResourcesForSecurityGroups(vpcID *gfnt.Value) *c }) } } - } else { - refControlPlaneSG = gfnt.NewString(c.spec.VPC.SecurityGroup) + c.securityGroups = gfnt.NewSlice(refControlPlaneSG) } - c.securityGroups = []*gfnt.Value{refControlPlaneSG} // only this one SG is passed to EKS API, nodes are isolated if c.spec.VPC.SharedNodeSecurityGroup == "" { refClusterSharedNodeSG = c.newResource(cfnSharedNodeSGResource, &gfnec2.SecurityGroup{ @@ -263,12 +267,16 @@ func (c *ClusterResourceSet) newResource(name string, resource gfn.Resource) *gf func (c *ClusterResourceSet) addResourcesForControlPlane(subnetDetails *SubnetDetails) { clusterVPC := &gfneks.Cluster_ResourcesVpcConfig{ - SubnetIds: gfnt.NewSlice(subnetDetails.ControlPlaneSubnetRefs()...), EndpointPublicAccess: gfnt.NewBoolean(*c.spec.VPC.ClusterEndpoints.PublicAccess), EndpointPrivateAccess: gfnt.NewBoolean(*c.spec.VPC.ClusterEndpoints.PrivateAccess), - SecurityGroupIds: gfnt.NewSlice(c.securityGroups...), + SecurityGroupIds: c.securityGroups, PublicAccessCidrs: gfnt.NewStringSlice(c.spec.VPC.PublicAccessCIDRs...), } + if subnetIDs := c.spec.VPC.ControlPlaneSubnetIDs; len(subnetIDs) > 0 { + clusterVPC.SubnetIds = gfnt.NewStringSlice(subnetIDs...) + } else { + clusterVPC.SubnetIds = gfnt.NewSlice(subnetDetails.ControlPlaneSubnetRefs()...) + } serviceRoleARN := gfnt.MakeFnGetAttString("ServiceRole", "Arn") if api.IsSetAndNonEmptyString(c.spec.IAM.ServiceRoleARN) { diff --git a/pkg/ctl/cmdutils/update_cluster_vpc.go b/pkg/ctl/cmdutils/update_cluster_vpc.go new file mode 100644 index 0000000000..13d546ea44 --- /dev/null +++ b/pkg/ctl/cmdutils/update_cluster_vpc.go @@ -0,0 +1,89 @@ +package cmdutils + +import ( + "fmt" + "strings" + + "github.com/kris-nova/logger" + + api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" +) + +// UpdateClusterVPCOptions holds the options for updating the VPC config. +type UpdateClusterVPCOptions struct { + // PrivateAccess configures access for the private endpoint. + PrivateAccess bool + // PublicAccess configures access for the public endpoint. + PublicAccess bool + // PublicAccessCIDRs configures the public access CIDRs. + PublicAccessCIDRs []string + // ControlPlaneSubnetIDs configures the subnets for the control plane. + ControlPlaneSubnetIDs []string + // ControlPlaneSecurityGroupIDs configures the security group IDs for the control plane. + ControlPlaneSecurityGroupIDs []string +} + +// NewUpdateClusterVPCLoader will load config or use flags for 'eksctl utils update-cluster-vpc-config'. +func NewUpdateClusterVPCLoader(cmd *Cmd, options UpdateClusterVPCOptions) ClusterConfigLoader { + l := newCommonClusterConfigLoader(cmd) + + supportedOptions := []string{ + "private-access", + "public-access", + "public-access-cidrs", + "control-plane-subnet-ids", + "control-plane-security-group-ids", + } + + l.flagsIncompatibleWithConfigFile.Insert(supportedOptions...) + + l.validateWithoutConfigFile = func() error { + if err := l.validateMetadataWithoutConfigFile(); err != nil { + return err + } + hasRequiredOptions := false + for _, option := range supportedOptions { + if flag := l.CobraCommand.Flag(option); flag != nil && flag.Changed { + hasRequiredOptions = true + break + } + } + if !hasRequiredOptions { + options := make([]string, 0, len(supportedOptions)) + for _, o := range supportedOptions { + options = append(options, "--"+o) + } + return fmt.Errorf("at least one of these options must be specified: %s", strings.Join(options, ", ")) + } + clusterConfig := cmd.ClusterConfig + if flag := l.CobraCommand.Flag("private-access"); flag != nil && flag.Changed { + clusterConfig.VPC.ClusterEndpoints = &api.ClusterEndpoints{ + PrivateAccess: &options.PrivateAccess, + } + } + if flag := l.CobraCommand.Flag("public-access"); flag != nil && flag.Changed { + if clusterConfig.VPC.ClusterEndpoints == nil { + clusterConfig.VPC.ClusterEndpoints = &api.ClusterEndpoints{ + PublicAccess: &options.PublicAccess, + } + } else { + clusterConfig.VPC.ClusterEndpoints.PublicAccess = &options.PublicAccess + } + } + clusterConfig.VPC.PublicAccessCIDRs = options.PublicAccessCIDRs + clusterConfig.VPC.ControlPlaneSubnetIDs = options.ControlPlaneSubnetIDs + clusterConfig.VPC.ControlPlaneSecurityGroupIDs = options.ControlPlaneSecurityGroupIDs + return nil + } + + l.validateWithConfigFile = func() error { + logger.Info("only changes to vpc.clusterEndpoints, vpc.publicAccessCIDRs, vpc.controlPlaneSubnetIDs and vpc.controlPlaneSecurityGroupIDs are updated in the EKS API, changes to any other fields will be ignored") + if l.ClusterConfig.VPC == nil { + l.ClusterConfig.VPC = api.NewClusterVPC(false) + } + api.SetClusterEndpointAccessDefaults(l.ClusterConfig.VPC) + return nil + } + + return l +} diff --git a/pkg/ctl/utils/mocks/VPCConfigUpdater.go b/pkg/ctl/utils/mocks/VPCConfigUpdater.go new file mode 100644 index 0000000000..6500898363 --- /dev/null +++ b/pkg/ctl/utils/mocks/VPCConfigUpdater.go @@ -0,0 +1,43 @@ +// Code generated by mockery v2.33.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + eks "github.com/aws/aws-sdk-go-v2/service/eks" + mock "github.com/stretchr/testify/mock" +) + +// VPCConfigUpdater is an autogenerated mock type for the VPCConfigUpdater type +type VPCConfigUpdater struct { + mock.Mock +} + +// UpdateClusterConfig provides a mock function with given fields: ctx, input +func (_m *VPCConfigUpdater) UpdateClusterConfig(ctx context.Context, input *eks.UpdateClusterConfigInput) error { + ret := _m.Called(ctx, input) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *eks.UpdateClusterConfigInput) error); ok { + r0 = rf(ctx, input) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewVPCConfigUpdater creates a new instance of VPCConfigUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVPCConfigUpdater(t interface { + mock.TestingT + Cleanup(func()) +}) *VPCConfigUpdater { + mock := &VPCConfigUpdater{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/ctl/utils/nodegroup_health.go b/pkg/ctl/utils/nodegroup_health.go index c3c91a4ede..1328d685db 100644 --- a/pkg/ctl/utils/nodegroup_health.go +++ b/pkg/ctl/utils/nodegroup_health.go @@ -64,7 +64,7 @@ func getNodeGroupHealth(cmd *cmdutils.Cmd, nodeGroupName string) error { } if cfg.IsControlPlaneOnOutposts() { - return errUnsupportedLocalCluster + return api.ErrUnsupportedLocalCluster } stackCollection := manager.NewStackCollection(ctl.AWSProvider, cfg) managedService := managed.NewService(ctl.AWSProvider.EKS(), ctl.AWSProvider.EC2(), stackCollection, cfg.Metadata.Name) diff --git a/pkg/ctl/utils/set_public_access_cidrs.go b/pkg/ctl/utils/set_public_access_cidrs.go index 42f6babab7..7154b6e8ec 100644 --- a/pkg/ctl/utils/set_public_access_cidrs.go +++ b/pkg/ctl/utils/set_public_access_cidrs.go @@ -4,10 +4,8 @@ import ( "context" "github.com/kris-nova/logger" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/util/sets" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" @@ -17,6 +15,7 @@ func publicAccessCIDRsCmdWithHandler(cmd *cmdutils.Cmd, handler func(cmd *cmduti cfg := api.NewClusterConfig() cmd.ClusterConfig = cfg + cmd.CobraCommand.Deprecated = "this command is deprecated and will be removed soon. Use `eksctl utils update-cluster-vpc-config --public-access-cidrs=<> instead." cmd.SetDescription("set-public-access-cidrs", "Update public access CIDRs", "CIDR blocks that EKS uses to create a security group on the public endpoint") cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error { @@ -57,40 +56,14 @@ func doUpdatePublicAccessCIDRs(cmd *cmdutils.Cmd) error { return err } - clusterVPCConfig, err := ctl.GetCurrentClusterVPCConfig(ctx, cfg) - if err != nil { - return err - } - - if cfg.IsControlPlaneOnOutposts() { - return errUnsupportedLocalCluster - } - - logger.Info("current public access CIDRs: %v", clusterVPCConfig.PublicAccessCIDRs) - - if cidrsEqual(clusterVPCConfig.PublicAccessCIDRs, cfg.VPC.PublicAccessCIDRs) { - logger.Success("Public Endpoint Restrictions for cluster %q in %q is already up to date", - meta.Name, meta.Region) - return nil + cfg.VPC.ClusterEndpoints = nil + cfg.VPC.ControlPlaneSubnetIDs = nil + cfg.VPC.ControlPlaneSecurityGroupIDs = nil + vpcHelper := &VPCHelper{ + VPCUpdater: ctl, + ClusterMeta: cfg.Metadata, + Cluster: ctl.Status.ClusterInfo.Cluster, + PlanMode: cmd.Plan, } - - cmdutils.LogIntendedAction( - cmd.Plan, "update Public Endpoint Restrictions for cluster %q in %q to: %v", - meta.Name, meta.Region, cfg.VPC.PublicAccessCIDRs) - - if !cmd.Plan { - if err := ctl.UpdatePublicAccessCIDRs(ctx, cfg); err != nil { - return errors.Wrap(err, "error updating CIDRs for public access") - } - cmdutils.LogCompletedAction( - false, - "Public Endpoint Restrictions for cluster %q in %q have been updated to: %v", - meta.Name, meta.Region, cfg.VPC.PublicAccessCIDRs) - } - cmdutils.LogPlanModeWarning(cmd.Plan) - return nil -} - -func cidrsEqual(currentValues, newValues []string) bool { - return sets.NewString(currentValues...).Equal(sets.NewString(newValues...)) + return vpcHelper.UpdateClusterVPCConfig(ctx, cfg.VPC) } diff --git a/pkg/ctl/utils/update_cluster_endpoint_access.go b/pkg/ctl/utils/update_cluster_endpoint_access.go index c9efa9d69d..66d39ed649 100644 --- a/pkg/ctl/utils/update_cluster_endpoint_access.go +++ b/pkg/ctl/utils/update_cluster_endpoint_access.go @@ -11,17 +11,17 @@ import ( "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" ) -var ( - private bool - public bool -) - func updateClusterEndpointsCmd(cmd *cmdutils.Cmd) { cfg := api.NewClusterConfig() cmd.ClusterConfig = cfg + cmd.CobraCommand.Deprecated = "this command is deprecated and will be removed soon. Use `eksctl utils update-cluster-vpc-config --public-access=<> --private-access=<> instead." cmd.SetDescription("update-cluster-endpoints", "Update Kubernetes API endpoint access configuration", "") + var ( + private bool + public bool + ) cmd.CobraCommand.RunE = func(_ *cobra.Command, _ []string) error { return doUpdateClusterEndpoints(cmd, private, public) } @@ -57,69 +57,19 @@ func doUpdateClusterEndpoints(cmd *cmdutils.Cmd, newPrivate bool, newPublic bool } logger.Info("using region %s", meta.Region) - if cfg.IsControlPlaneOnOutposts() { - return errUnsupportedLocalCluster - } - if ok, err := ctl.CanUpdate(cfg); !ok { return err } - clusterVPCConfig, err := ctl.GetCurrentClusterVPCConfig(ctx, cfg) - if err != nil { - return err - } - - curPrivate, curPublic := *clusterVPCConfig.ClusterEndpoints.PrivateAccess, *clusterVPCConfig.ClusterEndpoints.PublicAccess - - logger.Info("current Kubernetes API endpoint access: privateAccess=%v, publicAccess=%v", - curPrivate, curPublic) - - if cfg.VPC.ClusterEndpoints.PrivateAccess == nil { - newPrivate = curPrivate - } else { - newPrivate = *cfg.VPC.ClusterEndpoints.PrivateAccess - } - if cfg.VPC.ClusterEndpoints.PublicAccess == nil { - newPublic = curPublic - } else { - newPublic = *cfg.VPC.ClusterEndpoints.PublicAccess - } - - // Nothing changed? - if newPrivate == curPrivate && newPublic == curPublic { - logger.Success("Kubernetes API endpoint access for cluster %q in %q is already up to date", - meta.Name, meta.Region) - return nil - } - - cfg.VPC.ClusterEndpoints.PrivateAccess = &newPrivate - cfg.VPC.ClusterEndpoints.PublicAccess = &newPublic - - cmdutils.LogIntendedAction( - cmd.Plan, "update Kubernetes API endpoint access for cluster %q in %q to: privateAccess=%v, publicAccess=%v", - meta.Name, meta.Region, newPrivate, newPublic) - - if err := cfg.ValidateClusterEndpointConfig(); err != nil { - return err - } - - // if it's a private only cluster warn the user - if api.PrivateOnly(cfg.VPC.ClusterEndpoints) { - logger.Warning(api.ErrClusterEndpointPrivateOnly.Error()) - } - - if !cmd.Plan { - if err := ctl.UpdateClusterConfigForEndpoints(ctx, cfg); err != nil { - return err - } - cmdutils.LogCompletedAction( - false, - "the Kubernetes API endpoint access for cluster %q in %q has been updated to: "+ - "privateAccess=%v, publicAccess=%v", - meta.Name, meta.Region, newPrivate, newPublic) + cfg.VPC.PublicAccessCIDRs = nil + cfg.VPC.ControlPlaneSubnetIDs = nil + cfg.VPC.ControlPlaneSecurityGroupIDs = nil + vpcHelper := &VPCHelper{ + VPCUpdater: ctl, + ClusterMeta: cfg.Metadata, + Cluster: ctl.Status.ClusterInfo.Cluster, + PlanMode: cmd.Plan, } - cmdutils.LogPlanModeWarning(cmd.Plan) - return nil + return vpcHelper.UpdateClusterVPCConfig(ctx, cfg.VPC) } diff --git a/pkg/ctl/utils/update_cluster_vpc_config.go b/pkg/ctl/utils/update_cluster_vpc_config.go new file mode 100644 index 0000000000..060a265bc5 --- /dev/null +++ b/pkg/ctl/utils/update_cluster_vpc_config.go @@ -0,0 +1,82 @@ +package utils + +import ( + "context" + + "github.com/kris-nova/logger" + "github.com/lithammer/dedent" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" +) + +func updateClusterVPCConfigWithHandler(cmd *cmdutils.Cmd, handler func(cmd *cmdutils.Cmd) error) { + cfg := api.NewClusterConfig() + cmd.ClusterConfig = cfg + + cmd.SetDescription("update-cluster-vpc-config", "Update Kubernetes API endpoint access configuration and public access CIDRs", + dedent.Dedent(`Updates the Kubernetes API endpoint access configuration and public access CIDRs. + + When a config file is passed, only changes to vpc.clusterEndpoints and vpc.publicAccessCIDRs are updated in the EKS API. + Changes to any other fields are ignored. + `), + ) + var options cmdutils.UpdateClusterVPCOptions + cmd.CobraCommand.RunE = func(_ *cobra.Command, args []string) error { + cmd.NameArg = cmdutils.GetNameArg(args) + if err := cmdutils.NewUpdateClusterVPCLoader(cmd, options).Load(); err != nil { + return err + } + return handler(cmd) + } + + cmd.FlagSetGroup.InFlagSet("General", func(fs *pflag.FlagSet) { + cmdutils.AddClusterFlag(fs, cfg.Metadata) + cmdutils.AddRegionFlag(fs, &cmd.ProviderConfig) + cmdutils.AddConfigFileFlag(fs, &cmd.ClusterConfigFile) + cmdutils.AddApproveFlag(fs, cmd) + }) + + cmd.FlagSetGroup.InFlagSet("Endpoint Access", func(fs *pflag.FlagSet) { + fs.BoolVar(&options.PrivateAccess, "private-access", false, "access for private (VPC) clients") + fs.BoolVar(&options.PublicAccess, "public-access", false, "access for public clients") + }) + cmd.FlagSetGroup.InFlagSet("Public Access CIDRs", func(fs *pflag.FlagSet) { + fs.StringSliceVar(&options.PublicAccessCIDRs, "public-access-cidrs", nil, "CIDR blocks that EKS uses to create a security group on the public endpoint") + }) + cmd.FlagSetGroup.InFlagSet("Control plane subnets and security groups", func(fs *pflag.FlagSet) { + fs.StringSliceVar(&options.ControlPlaneSubnetIDs, "control-plane-subnet-ids", nil, "Subnet IDs for the control plane") + fs.StringSliceVar(&options.ControlPlaneSecurityGroupIDs, "control-plane-security-group-ids", nil, "Security group IDs for the control plane") + }) + + cmdutils.AddCommonFlagsForAWS(cmd, &cmd.ProviderConfig, false) +} + +func updateClusterVPCConfigCmd(cmd *cmdutils.Cmd) { + updateClusterVPCConfigWithHandler(cmd, doUpdateClusterVPCConfig) +} + +func doUpdateClusterVPCConfig(cmd *cmdutils.Cmd) error { + ctx := context.Background() + ctl, err := cmd.NewProviderForExistingCluster(ctx) + if err != nil { + return err + } + cfg := cmd.ClusterConfig + logger.Info("using region %s", cfg.Metadata.Region) + + if ok, err := ctl.CanUpdate(cfg); !ok { + return err + } + + vpcHelper := &VPCHelper{ + VPCUpdater: ctl, + ClusterMeta: cfg.Metadata, + Cluster: ctl.Status.ClusterInfo.Cluster, + PlanMode: cmd.Plan, + } + + return vpcHelper.UpdateClusterVPCConfig(ctx, cfg.VPC) +} diff --git a/pkg/ctl/utils/update_cluster_vpc_config_test.go b/pkg/ctl/utils/update_cluster_vpc_config_test.go new file mode 100644 index 0000000000..3733b1c9fb --- /dev/null +++ b/pkg/ctl/utils/update_cluster_vpc_config_test.go @@ -0,0 +1,26 @@ +package utils + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type updateClusterVPCEntry struct { + args []string + expectedErr string +} + +var _ = DescribeTable("invalid usage of update-cluster-vpc-config", func(e updateClusterVPCEntry) { + cmd := newMockCmd(append([]string{"update-cluster-vpc-config"}, e.args...)...) + _, err := cmd.execute() + Expect(err).To(MatchError(ContainSubstring(e.expectedErr))) +}, + Entry("missing --cluster option", updateClusterVPCEntry{ + expectedErr: "--cluster must be set", + }), + + Entry("missing a required parameter", updateClusterVPCEntry{ + args: []string{"--cluster", "test"}, + expectedErr: "at least one of these options must be specified: --private-access, --public-access, --public-access-cidrs, --control-plane-subnet-ids, --control-plane-security-group-ids", + }), +) diff --git a/pkg/ctl/utils/utils.go b/pkg/ctl/utils/utils.go index df145d320f..764af2c896 100644 --- a/pkg/ctl/utils/utils.go +++ b/pkg/ctl/utils/utils.go @@ -1,15 +1,11 @@ package utils import ( - "errors" - "github.com/spf13/cobra" "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" ) -var errUnsupportedLocalCluster = errors.New("this operation is not supported on local clusters") - // Command will create the `utils` commands func Command(flagGrouping *cmdutils.FlagGrouping) *cobra.Command { verbCmd := cmdutils.NewVerbCmd("utils", "Various utils", "") @@ -25,6 +21,7 @@ func Command(flagGrouping *cmdutils.FlagGrouping) *cobra.Command { cmdutils.AddResourceCmd(flagGrouping, verbCmd, installWindowsVPCController) cmdutils.AddResourceCmd(flagGrouping, verbCmd, updateClusterEndpointsCmd) cmdutils.AddResourceCmd(flagGrouping, verbCmd, publicAccessCIDRsCmd) + cmdutils.AddResourceCmd(flagGrouping, verbCmd, updateClusterVPCConfigCmd) cmdutils.AddResourceCmd(flagGrouping, verbCmd, enableSecretsEncryptionCmd) cmdutils.AddResourceCmd(flagGrouping, verbCmd, schemaCmd) cmdutils.AddResourceCmd(flagGrouping, verbCmd, nodeGroupHealthCmd) diff --git a/pkg/ctl/utils/vpc_helper.go b/pkg/ctl/utils/vpc_helper.go new file mode 100644 index 0000000000..2155d0d40f --- /dev/null +++ b/pkg/ctl/utils/vpc_helper.go @@ -0,0 +1,180 @@ +package utils + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/kris-nova/logger" + + "golang.org/x/exp/slices" + + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + + api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" +) + +// A VPCConfigUpdater updates a cluster's VPC config. +type VPCConfigUpdater interface { + UpdateClusterConfig(ctx context.Context, input *eks.UpdateClusterConfigInput) error +} + +// A VPCHelper is a helper for updating a cluster's VPC config. +type VPCHelper struct { + // VPCUpdater updates the VPC config. + VPCUpdater VPCConfigUpdater + // ClusterMeta holds the cluster metadata. + ClusterMeta *api.ClusterMeta + // Cluster holds the current cluster state. + Cluster *ekstypes.Cluster + // PlanMode configures the plan mode. + PlanMode bool +} + +// UpdateClusterVPCConfig updates the cluster endpoints and public access CIDRs. +func (v *VPCHelper) UpdateClusterVPCConfig(ctx context.Context, vpc *api.ClusterVPC) error { + if v.Cluster.OutpostConfig != nil { + return api.ErrUnsupportedLocalCluster + } + if vpc.ClusterEndpoints != nil { + if err := v.updateEndpointAccess(ctx, *vpc.ClusterEndpoints); err != nil { + return err + } + } + if vpc.PublicAccessCIDRs != nil { + if err := v.updatePublicAccessCIDRs(ctx, vpc); err != nil { + return err + } + } + if vpc.ControlPlaneSubnetIDs != nil || vpc.ControlPlaneSecurityGroupIDs != nil { + if err := v.updateSubnetsSecurityGroups(ctx, vpc); err != nil { + return err + } + } + cmdutils.LogPlanModeWarning(v.PlanMode) + return nil +} + +func (v *VPCHelper) updateSubnetsSecurityGroups(ctx context.Context, vpc *api.ClusterVPC) error { + current := v.Cluster.ResourcesVpcConfig + hasUpdate := false + vpcUpdate := &ekstypes.VpcConfigRequest{ + SubnetIds: current.SubnetIds, + SecurityGroupIds: current.SecurityGroupIds, + } + + compareValues := func(currentValues, newValues []string, resourceName string, updateFn func()) { + if !slices.Equal(currentValues, newValues) { + updateFn() + hasUpdate = true + cmdutils.LogIntendedAction(v.PlanMode, "update %s for cluster %q in %q to: %v", resourceName, + v.ClusterMeta.Name, v.ClusterMeta.Region, newValues) + } else { + logger.Success("%s for cluster %q in %q are already up-to-date", resourceName, v.ClusterMeta.Name, v.ClusterMeta.Region) + } + } + if vpc.ControlPlaneSubnetIDs != nil { + compareValues(current.SubnetIds, vpc.ControlPlaneSubnetIDs, "control plane subnet IDs", func() { + vpcUpdate.SubnetIds = vpc.ControlPlaneSubnetIDs + }) + } + + if vpc.ControlPlaneSecurityGroupIDs != nil { + compareValues(current.SecurityGroupIds, vpc.ControlPlaneSecurityGroupIDs, "control plane security group IDs", func() { + vpcUpdate.SecurityGroupIds = vpc.ControlPlaneSecurityGroupIDs + }) + } + + if v.PlanMode || !hasUpdate { + return nil + } + if err := v.updateVPCConfig(ctx, vpcUpdate); err != nil { + return err + } + cmdutils.LogCompletedAction(false, "control plane subnets and security groups for cluster %q in %q have been updated to: "+ + "controlPlaneSubnetIDs=%v, controlPlaneSecurityGroupIDs=%v", v.ClusterMeta.Name, v.ClusterMeta.Region, vpcUpdate.SubnetIds, vpcUpdate.SecurityGroupIds) + + return nil +} + +func (v *VPCHelper) updateEndpointAccess(ctx context.Context, desired api.ClusterEndpoints) error { + current := v.Cluster.ResourcesVpcConfig + if desired.PublicAccess == nil { + desired.PublicAccess = aws.Bool(current.EndpointPublicAccess) + } + if desired.PrivateAccess == nil { + desired.PrivateAccess = aws.Bool(current.EndpointPrivateAccess) + } + if *desired.PublicAccess == current.EndpointPublicAccess && *desired.PrivateAccess == current.EndpointPrivateAccess { + logger.Success("Kubernetes API endpoint access for cluster %q in %q is already up-to-date", + v.ClusterMeta.Name, v.ClusterMeta.Region) + return nil + } + + cmdutils.LogIntendedAction( + v.PlanMode, "update Kubernetes API endpoint access for cluster %q in %q to: privateAccess=%v, publicAccess=%v", + v.ClusterMeta.Name, v.ClusterMeta.Region, *desired.PrivateAccess, *desired.PublicAccess) + if api.PrivateOnly(&desired) { + logger.Warning(api.ErrClusterEndpointPrivateOnly.Error()) + } + if v.PlanMode { + return nil + } + endpointUpdate := &ekstypes.VpcConfigRequest{ + EndpointPrivateAccess: desired.PrivateAccess, + EndpointPublicAccess: desired.PublicAccess, + } + if err := v.updateVPCConfig(ctx, endpointUpdate); err != nil { + return err + } + cmdutils.LogCompletedAction( + false, + "Kubernetes API endpoint access for cluster %q in %q has been updated to: "+ + "privateAccess=%v, publicAccess=%v", + v.ClusterMeta.Name, v.ClusterMeta.Region, *desired.PrivateAccess, *desired.PublicAccess) + return nil +} + +func (v *VPCHelper) updatePublicAccessCIDRs(ctx context.Context, vpc *api.ClusterVPC) error { + if cidrsEqual(v.Cluster.ResourcesVpcConfig.PublicAccessCidrs, vpc.PublicAccessCIDRs) { + logger.Success("public access CIDRs for cluster %q in %q are already up-to-date", + v.ClusterMeta.Name, v.ClusterMeta.Region) + return nil + } + + logger.Info("current public access CIDRs: %v", v.Cluster.ResourcesVpcConfig.PublicAccessCidrs) + cmdutils.LogIntendedAction( + v.PlanMode, "update public access CIDRs for cluster %q in %q to: %v", + v.ClusterMeta.Name, v.ClusterMeta.Region, vpc.PublicAccessCIDRs) + + if v.PlanMode { + return nil + } + + if err := v.updateVPCConfig(ctx, &ekstypes.VpcConfigRequest{ + PublicAccessCidrs: vpc.PublicAccessCIDRs, + }); err != nil { + return fmt.Errorf("error updating CIDRs for public access: %w", err) + } + cmdutils.LogCompletedAction( + false, + "public access CIDRs for cluster %q in %q have been updated to: %v", + v.ClusterMeta.Name, v.ClusterMeta.Region, vpc.PublicAccessCIDRs) + return nil +} + +func (v *VPCHelper) updateVPCConfig(ctx context.Context, vpcConfig *ekstypes.VpcConfigRequest) error { + return v.VPCUpdater.UpdateClusterConfig(ctx, &eks.UpdateClusterConfigInput{ + Name: v.Cluster.Name, + ResourcesVpcConfig: vpcConfig, + }) +} + +func cidrsEqual(currentValues, newValues []string) bool { + if len(newValues) == 0 && len(currentValues) == 1 && currentValues[0] == "0.0.0.0/0" { + return true + } + return slices.Equal(currentValues, newValues) +} diff --git a/pkg/ctl/utils/vpc_helper_test.go b/pkg/ctl/utils/vpc_helper_test.go new file mode 100644 index 0000000000..cbcca769b4 --- /dev/null +++ b/pkg/ctl/utils/vpc_helper_test.go @@ -0,0 +1,344 @@ +package utils_test + +import ( + "context" + + "github.com/stretchr/testify/mock" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/weaveworks/eksctl/pkg/ctl/utils/mocks" + + api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" + + "github.com/weaveworks/eksctl/pkg/ctl/utils" +) + +type vpcHelperEntry struct { + clusterVPC *ekstypes.VpcConfigResponse + vpc *api.ClusterVPC + outposts bool + planMode bool + + expectedUpdates []*eks.UpdateClusterConfigInput + expectedErr string +} + +var _ = DescribeTable("VPCHelper", func(e vpcHelperEntry) { + const updateClusterConfigMethodName = "UpdateClusterConfig" + var vpcUpdater mocks.VPCConfigUpdater + vpcUpdater.On(updateClusterConfigMethodName, mock.Anything, mock.Anything).Return(nil) + + clusterMeta := &api.ClusterMeta{ + Name: "test", + } + cluster := &ekstypes.Cluster{ + Name: aws.String(clusterMeta.Name), + ResourcesVpcConfig: e.clusterVPC, + } + if e.outposts { + cluster.OutpostConfig = &ekstypes.OutpostConfigResponse{ + OutpostArns: []string{"arn:aws:outposts:us-west-2:1234:outpost/op-1234"}, + } + } + vpcHelper := &utils.VPCHelper{ + VPCUpdater: &vpcUpdater, + ClusterMeta: clusterMeta, + Cluster: cluster, + PlanMode: e.planMode, + } + err := vpcHelper.UpdateClusterVPCConfig(context.Background(), e.vpc) + if e.expectedErr != "" { + Expect(err).To(MatchError(ContainSubstring(e.expectedErr))) + } else { + Expect(err).NotTo(HaveOccurred()) + } + vpcUpdater.AssertNumberOfCalls(GinkgoT(), updateClusterConfigMethodName, len(e.expectedUpdates)) + for _, u := range e.expectedUpdates { + vpcUpdater.AssertCalled(GinkgoT(), updateClusterConfigMethodName, mock.Anything, u) + } +}, + Entry("cluster matches default config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + }, + vpc: &api.ClusterVPC{}, + }), + + Entry("cluster matches desired config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Enabled(), + PrivateAccess: api.Disabled(), + }, + }, + }), + + Entry("cluster endpoint access does not match desired config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Enabled(), + PrivateAccess: api.Enabled(), + }, + }, + + expectedUpdates: []*eks.UpdateClusterConfigInput{ + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + EndpointPublicAccess: api.Enabled(), + EndpointPrivateAccess: api.Enabled(), + }, + }, + }, + }), + + Entry("cluster public access CIDRs do not match desired config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Enabled(), + PrivateAccess: api.Disabled(), + }, + PublicAccessCIDRs: []string{"1.1.1.1/32", "2.2.2.2/32"}, + }, + + expectedUpdates: []*eks.UpdateClusterConfigInput{ + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + PublicAccessCidrs: []string{"1.1.1.1/32", "2.2.2.2/32"}, + }, + }, + }, + }), + + Entry("both cluster endpoint access and public access CIDRs do not match desired config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Disabled(), + PrivateAccess: api.Enabled(), + }, + PublicAccessCIDRs: []string{"1.1.1.1/32", "2.2.2.2/32"}, + }, + + expectedUpdates: []*eks.UpdateClusterConfigInput{ + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + EndpointPublicAccess: api.Disabled(), + EndpointPrivateAccess: api.Enabled(), + }, + }, + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + PublicAccessCidrs: []string{"1.1.1.1/32", "2.2.2.2/32"}, + }, + }, + }, + }), + + Entry("cluster does not match desired config but in plan mode", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Disabled(), + PrivateAccess: api.Enabled(), + }, + PublicAccessCIDRs: []string{"1.1.1.1/32", "2.2.2.2/32"}, + }, + planMode: true, + }), + + Entry("updating an Outpost cluster fails", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Disabled(), + PrivateAccess: api.Enabled(), + }, + PublicAccessCIDRs: []string{"1.1.1.1/32", "2.2.2.2/32"}, + }, + outposts: true, + + expectedErr: "this operation is not supported on Outposts clusters", + }), + + Entry("cluster matches desired config when subnets and security groups are specified", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + SecurityGroupIds: []string{"sg-1234"}, + SubnetIds: []string{"subnet-1234"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Enabled(), + PrivateAccess: api.Disabled(), + }, + ControlPlaneSecurityGroupIDs: []string{"sg-1234"}, + ControlPlaneSubnetIDs: []string{"subnet-1234"}, + }, + }), + + Entry("cluster security groups do not match desired config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + SecurityGroupIds: []string{"sg-1234"}, + SubnetIds: []string{"subnet-1234"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Enabled(), + PrivateAccess: api.Disabled(), + }, + ControlPlaneSecurityGroupIDs: []string{"sg-1234", "sg-5678"}, + ControlPlaneSubnetIDs: []string{"subnet-1234"}, + }, + + expectedUpdates: []*eks.UpdateClusterConfigInput{ + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + SecurityGroupIds: []string{"sg-1234", "sg-5678"}, + SubnetIds: []string{"subnet-1234"}, + }, + }, + }, + }), + + Entry("cluster subnets do not match desired config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + SecurityGroupIds: []string{"sg-1234", "sg-5678"}, + SubnetIds: []string{"subnet-1234"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Enabled(), + PrivateAccess: api.Disabled(), + }, + ControlPlaneSecurityGroupIDs: []string{"sg-1234", "sg-5678"}, + ControlPlaneSubnetIDs: []string{"subnet-1234", "subnet-5678"}, + }, + + expectedUpdates: []*eks.UpdateClusterConfigInput{ + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + SecurityGroupIds: []string{"sg-1234", "sg-5678"}, + SubnetIds: []string{"subnet-1234", "subnet-5678"}, + }, + }, + }, + }), + + Entry("cluster security group and subnets do not match desired config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: true, + EndpointPrivateAccess: false, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + SecurityGroupIds: []string{"sg-1234", "sg-5678"}, + SubnetIds: []string{"subnet-1234"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Enabled(), + PrivateAccess: api.Disabled(), + }, + ControlPlaneSecurityGroupIDs: []string{"sg-1234", "sg-5678"}, + ControlPlaneSubnetIDs: []string{"subnet-1234", "subnet-5678"}, + }, + + expectedUpdates: []*eks.UpdateClusterConfigInput{ + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + SecurityGroupIds: []string{"sg-1234", "sg-5678"}, + SubnetIds: []string{"subnet-1234", "subnet-5678"}, + }, + }, + }, + }), + + Entry("no fields match desired config", vpcHelperEntry{ + clusterVPC: &ekstypes.VpcConfigResponse{ + EndpointPublicAccess: false, + EndpointPrivateAccess: true, + PublicAccessCidrs: []string{"0.0.0.0/0"}, + SecurityGroupIds: []string{"sg-1234"}, + SubnetIds: []string{"subnet-1234"}, + }, + vpc: &api.ClusterVPC{ + ClusterEndpoints: &api.ClusterEndpoints{ + PublicAccess: api.Enabled(), + PrivateAccess: api.Disabled(), + }, + PublicAccessCIDRs: []string{"1.1.1.1/1"}, + ControlPlaneSecurityGroupIDs: []string{"sg-1234", "sg-5678"}, + ControlPlaneSubnetIDs: []string{"subnet-1234", "subnet-5678"}, + }, + + expectedUpdates: []*eks.UpdateClusterConfigInput{ + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + EndpointPublicAccess: api.Enabled(), + EndpointPrivateAccess: api.Disabled(), + }, + }, + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + PublicAccessCidrs: []string{"1.1.1.1/1"}, + }, + }, + { + Name: aws.String("test"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + SecurityGroupIds: []string{"sg-1234", "sg-5678"}, + SubnetIds: []string{"subnet-1234", "subnet-5678"}, + }, + }, + }, + }), +) diff --git a/pkg/eks/mocks/ConfigProvider.go b/pkg/eks/mocks/ConfigProvider.go index 5e809b75b7..4d355e02d5 100644 --- a/pkg/eks/mocks/ConfigProvider.go +++ b/pkg/eks/mocks/ConfigProvider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocks diff --git a/pkg/eks/mocks/KubeNodeGroup.go b/pkg/eks/mocks/KubeNodeGroup.go index 38f1266e8d..838077b6c1 100644 --- a/pkg/eks/mocks/KubeNodeGroup.go +++ b/pkg/eks/mocks/KubeNodeGroup.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocks diff --git a/pkg/eks/mocksv2/ASG.go b/pkg/eks/mocksv2/ASG.go index a349b2c35c..7e0e7517a0 100644 --- a/pkg/eks/mocksv2/ASG.go +++ b/pkg/eks/mocksv2/ASG.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/CloudFormation.go b/pkg/eks/mocksv2/CloudFormation.go index 2fedae3a33..c166d54efd 100644 --- a/pkg/eks/mocksv2/CloudFormation.go +++ b/pkg/eks/mocksv2/CloudFormation.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/CloudTrail.go b/pkg/eks/mocksv2/CloudTrail.go index e42d9037fe..b36b3eb52e 100644 --- a/pkg/eks/mocksv2/CloudTrail.go +++ b/pkg/eks/mocksv2/CloudTrail.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/CloudWatchLogs.go b/pkg/eks/mocksv2/CloudWatchLogs.go index 2704574faf..eb560e8951 100644 --- a/pkg/eks/mocksv2/CloudWatchLogs.go +++ b/pkg/eks/mocksv2/CloudWatchLogs.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/CredentialsProvider.go b/pkg/eks/mocksv2/CredentialsProvider.go index e90646c8a4..ecfad51e3a 100644 --- a/pkg/eks/mocksv2/CredentialsProvider.go +++ b/pkg/eks/mocksv2/CredentialsProvider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/EC2.go b/pkg/eks/mocksv2/EC2.go index 2da8e309f6..c3d7bf155a 100644 --- a/pkg/eks/mocksv2/EC2.go +++ b/pkg/eks/mocksv2/EC2.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/EKS.go b/pkg/eks/mocksv2/EKS.go index ac9e59ea7f..621beea3ab 100644 --- a/pkg/eks/mocksv2/EKS.go +++ b/pkg/eks/mocksv2/EKS.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/ELB.go b/pkg/eks/mocksv2/ELB.go index 83d8a2da97..b931191739 100644 --- a/pkg/eks/mocksv2/ELB.go +++ b/pkg/eks/mocksv2/ELB.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/ELBV2.go b/pkg/eks/mocksv2/ELBV2.go index 02fa44db67..0a0a3d0bb9 100644 --- a/pkg/eks/mocksv2/ELBV2.go +++ b/pkg/eks/mocksv2/ELBV2.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/IAM.go b/pkg/eks/mocksv2/IAM.go index d335605596..b631faf946 100644 --- a/pkg/eks/mocksv2/IAM.go +++ b/pkg/eks/mocksv2/IAM.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/Outposts.go b/pkg/eks/mocksv2/Outposts.go index 76c2509631..63653daa51 100644 --- a/pkg/eks/mocksv2/Outposts.go +++ b/pkg/eks/mocksv2/Outposts.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/SSM.go b/pkg/eks/mocksv2/SSM.go index 3d3f15d52a..e84a86c534 100644 --- a/pkg/eks/mocksv2/SSM.go +++ b/pkg/eks/mocksv2/SSM.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/mocksv2/STS.go b/pkg/eks/mocksv2/STS.go index 98f66be61c..dbb2684aec 100644 --- a/pkg/eks/mocksv2/STS.go +++ b/pkg/eks/mocksv2/STS.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.32.2. DO NOT EDIT. +// Code generated by mockery v2.33.1. DO NOT EDIT. package mocksv2 diff --git a/pkg/eks/update.go b/pkg/eks/update.go index 8a8594d9a5..d482d515b3 100644 --- a/pkg/eks/update.go +++ b/pkg/eks/update.go @@ -83,12 +83,7 @@ func (c *ClusterProvider) UpdateClusterConfigForLogging(ctx context.Context, cfg }, }, } - - output, err := c.AWSProvider.EKS().UpdateClusterConfig(ctx, input) - if err != nil { - return err - } - if err := c.waitForUpdateToSucceed(ctx, cfg.Metadata.Name, output.Update); err != nil { + if err := c.UpdateClusterConfig(ctx, input); err != nil { return err } @@ -147,12 +142,7 @@ func (c *ClusterProvider) UpdateClusterConfigForEndpoints(ctx context.Context, c }, } - output, err := c.AWSProvider.EKS().UpdateClusterConfig(ctx, input) - if err != nil { - return err - } - - return c.waitForUpdateToSucceed(ctx, cfg.Metadata.Name, output.Update) + return c.UpdateClusterConfig(ctx, input) } // UpdatePublicAccessCIDRs calls eks.UpdateClusterConfig and updates the CIDRs for public access @@ -163,11 +153,16 @@ func (c *ClusterProvider) UpdatePublicAccessCIDRs(ctx context.Context, clusterCo PublicAccessCidrs: clusterConfig.VPC.PublicAccessCIDRs, }, } + return c.UpdateClusterConfig(ctx, input) +} + +// UpdateClusterConfig calls EKS.UpdateClusterConfig and waits for the update to complete. +func (c *ClusterProvider) UpdateClusterConfig(ctx context.Context, input *eks.UpdateClusterConfigInput) error { output, err := c.AWSProvider.EKS().UpdateClusterConfig(ctx, input) if err != nil { return err } - return c.waitForUpdateToSucceed(ctx, clusterConfig.Metadata.Name, output.Update) + return c.waitForUpdateToSucceed(ctx, *input.Name, output.Update) } // EnableKMSEncryption enables KMS encryption for the specified cluster diff --git a/pkg/version/release.go b/pkg/version/release.go index 82dcd93066..8bc96eeca1 100644 --- a/pkg/version/release.go +++ b/pkg/version/release.go @@ -3,7 +3,7 @@ package version // This file was generated by release_generate.go; DO NOT EDIT. // Version is the version number in semver format X.Y.Z -var Version = "0.164.0" +var Version = "0.165.0" // PreReleaseID can be empty for releases, "rc.X" for release candidates and "dev" for snapshots var PreReleaseID = "dev" diff --git a/userdocs/mkdocs.yml b/userdocs/mkdocs.yml index 66ac005bfa..6ce84d4a44 100644 --- a/userdocs/mkdocs.yml +++ b/userdocs/mkdocs.yml @@ -179,6 +179,7 @@ nav: - usage/vpc-configuration.md - usage/vpc-subnet-settings.md - usage/vpc-cluster-access.md + - usage/cluster-subnets-security-groups.md - usage/vpc-ip-family.md - IAM: - usage/minimum-iam-policies.md diff --git a/userdocs/src/getting-started.md b/userdocs/src/getting-started.md index 8c055fa2d5..fefb363dfa 100644 --- a/userdocs/src/getting-started.md +++ b/userdocs/src/getting-started.md @@ -1,6 +1,8 @@ # Getting started !!! tip "New for 2023" + `eksctl` now supports [updating the subnets and security groups](/usage/cluster-subnets-security-groups) associated with the EKS control plane. + `eksctl` now supports creating fully private clusters on [AWS Outposts](/usage/outposts). `eksctl` now supports new ISO regions `us-iso-east-1` and `us-isob-east-1`. diff --git a/userdocs/src/usage/cluster-subnets-security-groups.md b/userdocs/src/usage/cluster-subnets-security-groups.md new file mode 100644 index 0000000000..70f55d7d89 --- /dev/null +++ b/userdocs/src/usage/cluster-subnets-security-groups.md @@ -0,0 +1,83 @@ +# Updating control plane subnets and security groups + +## Updating control plane subnets +When a cluster is created with eksctl, a set of public and private subnets are created and passed to the EKS API. +EKS creates 2 to 4 cross-account elastic network interfaces (ENIs) in those subnets to enable communication between the EKS +managed Kubernetes control plane and your VPC. + +To update the subnets used by the EKS control plane, run: + +```console +eksctl utils update-cluster-vpc-config --cluster= --control-plane-subnet-ids=subnet-1234,subnet-5678 +``` + +To update the setting using a config file: + +```yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: cluster + region: us-west-2 + +vpc: + controlPlaneSubnetIDs: [subnet-1234, subnet-5678] +``` + +```console +eksctl utils update-cluster-vpc-config -f config.yaml +``` + +Without the `--approve` flag, eksctl only logs the proposed changes. Once you are satisfied with the proposed changes, rerun the command with +the `--approve` flag. + +## Updating control plane security groups +To manage traffic between the control plane and worker nodes, EKS supports passing additional security groups that are applied to the cross-account network interfaces +provisioned by EKS. To update the security groups for the EKS control plane, run: + +```console +eksctl utils update-cluster-vpc-config --cluster= --control-plane-security-group-ids=sg-1234,sg-5678 --approve +``` + +To update the setting using a config file: + +```yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: cluster + region: us-west-2 + +vpc: + controlPlaneSecurityGroupIDs: [sg-1234, sg-5678] +``` + +```console +eksctl utils update-cluster-vpc-config -f config.yaml +``` + +To update both control plane subnets and security groups for a cluster, run: + +```console +eksctl utils update-cluster-vpc-config --cluster= --control-plane-subnet-ids=<> --control-plane-security-group-ids=<> --approve +``` + +To update both fields using a config file: + +```yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: cluster + region: us-west-2 + +vpc: + controlPlaneSubnetIDs: [subnet-1234, subnet-5678] + controlPlaneSecurityGroupIDs: [sg-1234, sg-5678] +``` + +```console +eksctl utils update-cluster-vpc-config -f config.yaml +``` + +For a complete example, refer to [cluster-subnets-sgs.yaml](https://github.com/eksctl-io/eksctl/blob/main/examples/38-cluster-subnets-sgs.yaml). \ No newline at end of file diff --git a/userdocs/src/usage/vpc-cluster-access.md b/userdocs/src/usage/vpc-cluster-access.md index 128757c2e7..2a9fec28dc 100644 --- a/userdocs/src/usage/vpc-cluster-access.md +++ b/userdocs/src/usage/vpc-cluster-access.md @@ -33,14 +33,18 @@ There are some additional caveats when configuring Kubernetes API endpoint acces The following is an example of how one could configure the Kubernetes API endpoint access using the `utils` sub-command: +```console +eksctl utils update-cluster-vpc-config --cluster= --private-access=true --public-access=false ``` -eksctl utils update-cluster-endpoints --name= --private-access=true --public-access=false -``` + +!!! warning + `eksctl utils update-cluster-endpoints` has been deprecated in favour of `eksctl utils update-cluster-vpc-config` + and will be removed soon. To update the setting using a `ClusterConfig` file, use: ```console -eksctl utils update-cluster-endpoints -f config.yaml --approve +eksctl utils update-cluster-vpc-config -f config.yaml --approve ``` Note that if you don't pass a flag, it will keep the current value. Once you are satisfied with the proposed changes, @@ -59,13 +63,17 @@ vpc: To update the restrictions on an existing cluster, use: ```console -eksctl utils set-public-access-cidrs --cluster= 1.1.1.1/32,2.2.2.0/24 +eksctl utils update-cluster-vpc-config --cluster= 1.1.1.1/32,2.2.2.0/24 ``` +!!! warning + `eksctl utils set-public-access-cidrs` has been deprecated in favour of `eksctl utils update-cluster-vpc-config` + and will be removed soon. + To update the restrictions using a `ClusterConfig` file, set the new CIDRs in `vpc.publicAccessCIDRs` and run: ```console -eksctl utils set-public-access-cidrs -f config.yaml +eksctl utils update-cluster-vpc-config -f config.yaml ``` !!! warning @@ -81,3 +89,24 @@ eksctl utils set-public-access-cidrs -f config.yaml the internet. (Source: https://github.com/aws/containers-roadmap/issues/108#issuecomment-552766489) Implementation notes: https://github.com/aws/containers-roadmap/issues/108#issuecomment-552698875 + + +To update both API server endpoint access and public access CIDRs for a cluster in a single command, run: + +```console +eksctl utils update-cluster-vpc-config --cluster= --public-access=true --private-access=true --public-access-cidrs=1.1.1.1/32,2.2.2.0/24 +``` + +To update the setting using a config file: + +```yaml +vpc: + clusterEndpoints: + publicAccess: + privateAccess: + publicAccessCIDRs: ["1.1.1.1/32"] +``` + +```console +eksctl utils update-cluster-vpc-config --cluster= -f config.yaml +``` From d8a70a07dfde9fb11e7bdcb4e448b0e914db54a7 Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Wed, 25 Oct 2023 21:41:54 +0300 Subject: [PATCH 03/10] fix IAM service account --- integration/tests/crud/creategetdelete_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index 24aa2a66cd..c76a09bb22 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -346,7 +346,7 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { Expect(awsConfig).To(HaveExistingStack(stackNamePrefix + test.Namespace + "-s3-reader")) Expect(awsConfig).To(HaveExistingStack(stackNamePrefix + "app1-app-cache-access")) - sa, err := clientSet.CoreV1().ServiceAccounts(metav1.NamespaceDefault).Get(context.TODO(), "s3-reader", metav1.GetOptions{}) + sa, err := clientSet.CoreV1().ServiceAccounts(test.Namespace).Get(context.TODO(), "s3-reader", metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) Expect(sa.Annotations).To(HaveLen(1)) Expect(sa.Annotations).To(HaveKey(api.AnnotationEKSRoleARN)) From 394cbc1ab111801274784a635edd4f7ed6ad7fa6 Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Wed, 25 Oct 2023 22:31:44 +0300 Subject: [PATCH 04/10] change cluster name --- integration/tests/crud/creategetdelete_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index c76a09bb22..ee3a7bcb0e 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -55,7 +55,7 @@ func init() { if err := api.Register(); err != nil { panic(errors.Wrap(err, "unexpected error registering API scheme")) } - params = tests.NewParamsWithGivenClusterName("crud", "test-crud") + params = tests.NewParamsWithGivenClusterName("crud", "test-cluster") } func TestCRUD(t *testing.T) { From a595aac88fdca48a774008619630b987d1e35984 Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Thu, 26 Oct 2023 10:55:52 +0300 Subject: [PATCH 05/10] remove duplicate cluster workloads test --- .../tests/managed/managed_nodegroup_test.go | 65 ------------------- 1 file changed, 65 deletions(-) diff --git a/integration/tests/managed/managed_nodegroup_test.go b/integration/tests/managed/managed_nodegroup_test.go index f5fee494c4..2f04c2bcde 100644 --- a/integration/tests/managed/managed_nodegroup_test.go +++ b/integration/tests/managed/managed_nodegroup_test.go @@ -15,8 +15,6 @@ import ( ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" awseks "github.com/aws/aws-sdk-go-v2/service/eks" - harness "github.com/dlespiau/kube-test-harness" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -80,8 +78,6 @@ var _ = Describe("(Integration) Create Managed Nodegroups", func() { ) var ( - defaultTimeout = 20 * time.Minute - makeClusterConfig = func() *api.ClusterConfig { clusterConfig := api.NewClusterConfig() clusterConfig.Metadata.Name = params.ClusterName @@ -144,67 +140,6 @@ var _ = Describe("(Integration) Create Managed Nodegroups", func() { }), ) - Context("create test workloads", func() { - var ( - err error - test *harness.Test - ) - - BeforeEach(func() { - test, err = kube.NewTest(params.KubeconfigPath) - Expect(err).ShouldNot(HaveOccurred()) - }) - - AfterEach(func() { - test.Close() - Eventually(func() int { - return len(test.ListPods(test.Namespace, metav1.ListOptions{}).Items) - }, "3m", "1s").Should(BeZero()) - }) - - It("should deploy podinfo service to the cluster and access it via proxy", func() { - d := test.CreateDeploymentFromFile(test.Namespace, "../../data/podinfo.yaml") - test.WaitForDeploymentReady(d, defaultTimeout) - - pods := test.ListPodsFromDeployment(d) - Expect(len(pods.Items)).To(Equal(2)) - - // For each pod of the Deployment, check we receive a sensible response to a - // GET request on /version. - for _, pod := range pods.Items { - Expect(pod.Namespace).To(Equal(test.Namespace)) - - req := test.PodProxyGet(&pod, "", "/version") - fmt.Fprintf(GinkgoWriter, "url = %#v", req.URL()) - - var js map[string]interface{} - test.PodProxyGetJSON(&pod, "", "/version", &js) - - Expect(js).To(HaveKeyWithValue("version", "1.5.1")) - } - }) - - It("should have functional DNS", func() { - d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-dns.yaml") - test.WaitForDaemonSetReady(d, defaultTimeout) - { - ds, err := test.GetDaemonSet(test.Namespace, d.Name) - Expect(err).ShouldNot(HaveOccurred()) - fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) - } - }) - - It("should have access to HTTP(S) sites", func() { - d := test.CreateDaemonSetFromFile(test.Namespace, "../../data/test-http.yaml") - test.WaitForDaemonSetReady(d, defaultTimeout) - { - ds, err := test.GetDaemonSet(test.Namespace, d.Name) - Expect(err).ShouldNot(HaveOccurred()) - fmt.Fprintf(GinkgoWriter, "ds.Status = %#v", ds.Status) - } - }) - }) - Context("adding new managed nodegroups", func() { params.LogStacksEventsOnFailure() From bd4e44440975d5e8447ea08fb662d6d42874fe8b Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Fri, 27 Oct 2023 11:31:59 +0300 Subject: [PATCH 06/10] refactor describe stacks test --- .../tests/crud/creategetdelete_test.go | 31 ++++++++++++++----- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index ee3a7bcb0e..6d9c60b206 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -55,7 +55,7 @@ func init() { if err := api.Register(); err != nil { panic(errors.Wrap(err, "unexpected error registering API scheme")) } - params = tests.NewParamsWithGivenClusterName("crud", "test-cluster") + params = tests.NewParamsWithGivenClusterName("crud", "test") } func TestCRUD(t *testing.T) { @@ -204,12 +204,29 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { var stacks []*cfntypes.Stack Expect(yaml.Unmarshal(session.Out.Contents(), &stacks)).To(Succeed()) Expect(stacks).To(HaveLen(6)) - //nodegroupStack := stacks[0] - //clusterStack := stacks[5] - //Expect(aws.ToString(clusterStack.StackName)).To(ContainSubstring(params.ClusterName)) - //Expect(aws.ToString(nodegroupStack.StackName)).To(ContainSubstring(params.ClusterName)) - //Expect(aws.ToString(clusterStack.Description)).To(Equal("EKS cluster (dedicated VPC: true, dedicated IAM: true) [created and managed by eksctl]")) - //Expect(aws.ToString(nodegroupStack.Description)).To(Equal("EKS Managed Nodes (SSH access: false) [created by eksctl]")) + + var ( + names, descriptions []string + ngPrefix = params.ClusterName + "-nodegroup-" + ) + for _, s := range stacks { + names = append(names, *s.StackName) + descriptions = append(descriptions, *s.Description) + } + + Expect(names).To(ContainElements( + ContainSubstring(params.ClusterName+"-cluster"), + ContainSubstring(ngPrefix+deleteNg), + ContainSubstring(ngPrefix+scaleSingleNg), + ContainSubstring(ngPrefix+scaleMultipleNg), + ContainSubstring(ngPrefix+scaleMultipleMng), + ContainSubstring(ngPrefix+drainMng), + )) + Expect(descriptions).To(ContainElements( + "EKS cluster (dedicated VPC: true, dedicated IAM: true) [created and managed by eksctl]", + "EKS Managed Nodes (SSH access: false) [created by eksctl]", + "EKS nodes (AMI family: AmazonLinux2, SSH access: false, private networking: false) [created and managed by eksctl]", + )) }) }) From e54348be10f105ff930516253049d4da2ac4021c Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Thu, 2 Nov 2023 09:12:52 +0200 Subject: [PATCH 07/10] [CRUD integration] Add wait flag to delete cluster command --- .../tests/crud/creategetdelete_test.go | 44 +++++++++++-------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index 6d9c60b206..aa1da20479 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -1056,30 +1056,38 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { "nodegroup", "--cluster", params.ClusterName, "--name", deleteNg, + "--wait", )).To(RunSuccessfully()) }) }) + + Context("fetching nodegroup(s)", Serial, func() { + It("should be able to get all expected nodegroups", func() { + Expect(params.EksctlGetCmd.WithArgs( + "nodegroup", + "-o", "json", + "--cluster", params.ClusterName, + )).To(RunSuccessfullyWithOutputString(BeNodeGroupsWithNamesWhich( + ContainElement(taintsNg1), + ContainElement(taintsNg2), + ContainElement(scaleSingleNg), + ContainElement(scaleMultipleNg), + ContainElement(scaleMultipleMng), + ContainElement(GPUMng), + ContainElement(drainMng), + ContainElement(newSubnetCLIMng), + ContainElement(newSubnetConfigFileMng), + ))) + }) + }) }) var _ = SynchronizedAfterSuite(func() {}, func() { - Expect(params.EksctlGetCmd.WithArgs( - "nodegroup", - "-o", "json", - "--cluster", params.ClusterName, - )).To(RunSuccessfullyWithOutputString(BeNodeGroupsWithNamesWhich( - // HaveLen(9), - ContainElement(taintsNg1), - ContainElement(taintsNg2), - ContainElement(scaleSingleNg), - ContainElement(scaleMultipleNg), - ContainElement(scaleMultipleMng), - ContainElement(GPUMng), - ContainElement(drainMng), - ContainElement(newSubnetCLIMng), - ContainElement(newSubnetConfigFileMng), - ))) - - params.DeleteClusters() + Expect(params.EksctlDeleteCmd.WithArgs( + "cluster", params.ClusterName, + "--wait", + )).To(RunSuccessfully()) + gexec.KillAndWait() if params.KubeconfigTemp { os.Remove(params.KubeconfigPath) From 7fad9e4f2f632a84898f61d3c07fdcd43289fc8f Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Fri, 3 Nov 2023 13:57:45 +0200 Subject: [PATCH 08/10] delete additional subnet --- integration/tests/crud/creategetdelete_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index aa1da20479..fc611f8072 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -1083,6 +1083,13 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { }) var _ = SynchronizedAfterSuite(func() {}, func() { + // before deleting the cluster, first delete the additioanl subnet + ec2 := awsec2.NewFromConfig(NewConfig(params.Region)) + _, err := ec2.DeleteSubnet(context.Background(), &awsec2.DeleteSubnetInput{ + SubnetId: &extraSubnetID, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(params.EksctlDeleteCmd.WithArgs( "cluster", params.ClusterName, "--wait", From 6da4bae66d39585bb7bbd051e2bbc789520dd623 Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Fri, 3 Nov 2023 14:25:41 +0200 Subject: [PATCH 09/10] fix typo Co-authored-by: Chetan Patwal --- integration/tests/crud/creategetdelete_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index fc611f8072..880a3f5417 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -1083,7 +1083,7 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { }) var _ = SynchronizedAfterSuite(func() {}, func() { - // before deleting the cluster, first delete the additioanl subnet + // before deleting the cluster, first delete the additional subnet ec2 := awsec2.NewFromConfig(NewConfig(params.Region)) _, err := ec2.DeleteSubnet(context.Background(), &awsec2.DeleteSubnetInput{ SubnetId: &extraSubnetID, From 4fd4e2f5160e7d13cff990232acc4915de9cd897 Mon Sep 17 00:00:00 2001 From: Tibi <110664232+TiberiuGC@users.noreply.github.com> Date: Fri, 3 Nov 2023 14:46:17 +0200 Subject: [PATCH 10/10] small refactor to trigger workflows --- integration/tests/crud/creategetdelete_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/integration/tests/crud/creategetdelete_test.go b/integration/tests/crud/creategetdelete_test.go index 880a3f5417..d3d4706dde 100644 --- a/integration/tests/crud/creategetdelete_test.go +++ b/integration/tests/crud/creategetdelete_test.go @@ -1085,9 +1085,7 @@ var _ = Describe("(Integration) Create, Get, Scale & Delete", func() { var _ = SynchronizedAfterSuite(func() {}, func() { // before deleting the cluster, first delete the additional subnet ec2 := awsec2.NewFromConfig(NewConfig(params.Region)) - _, err := ec2.DeleteSubnet(context.Background(), &awsec2.DeleteSubnetInput{ - SubnetId: &extraSubnetID, - }) + _, err := ec2.DeleteSubnet(context.Background(), &awsec2.DeleteSubnetInput{SubnetId: &extraSubnetID}) Expect(err).NotTo(HaveOccurred()) Expect(params.EksctlDeleteCmd.WithArgs(