From e86ded86e65e138977da558987260d60ae491bdc Mon Sep 17 00:00:00 2001 From: Ilya Dmitrichenko Date: Wed, 7 Nov 2018 16:59:16 +0000 Subject: [PATCH] Clear separation between `api.ClusterConfig` and `api.ClusterProvider` This refactors how we separate provider configuration and it's recievers from cluster configuration. We no longer copy cluster configuration in every places where we need it, instead we pass it explicitly every time. --- pkg/ami/auto_resolver_test.go | 4 - pkg/az/az_test.go | 4 - pkg/cfn/builder/api_test.go | 31 ++++---- pkg/cfn/builder/cluster.go | 2 +- pkg/cfn/builder/nodegroup.go | 4 +- pkg/cfn/builder/vpc.go | 2 +- pkg/cfn/manager/api.go | 22 +++--- pkg/cfn/manager/cluster.go | 2 +- pkg/cfn/manager/deprecated.go | 8 +- pkg/cfn/manager/nodegroup.go | 4 +- pkg/cfn/manager/waiters.go | 4 +- pkg/ctl/create/cluster.go | 62 ++++++++------- pkg/ctl/delete/cluster.go | 37 ++++----- pkg/ctl/get/cluster.go | 32 ++++---- pkg/ctl/scale/nodegroup.go | 21 ++--- pkg/ctl/utils/describe_stacks.go | 25 +++--- pkg/ctl/utils/wait_nodes.go | 9 ++- pkg/ctl/utils/write_kubeconfig.go | 29 +++---- pkg/eks/api.go | 101 +++++++++++++++--------- pkg/eks/api/api.go | 48 +++++++---- pkg/eks/auth.go | 30 +++---- pkg/eks/eks.go | 62 +++++++-------- pkg/eks/eks_test.go | 25 ++---- pkg/eks/nodegroup.go | 4 +- pkg/eks/vpc.go | 24 +++--- pkg/nodebootstrap/userdata.go | 4 +- pkg/testutils/mock_provider.go | 19 +++++ pkg/utils/kubeconfig/kubeconfig.go | 20 ++--- pkg/utils/kubeconfig/kubeconfig_test.go | 16 ++-- 29 files changed, 353 insertions(+), 302 deletions(-) diff --git a/pkg/ami/auto_resolver_test.go b/pkg/ami/auto_resolver_test.go index 44e61b6e6a..f1cd184ae9 100644 --- a/pkg/ami/auto_resolver_test.go +++ b/pkg/ami/auto_resolver_test.go @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/mock" . "github.com/weaveworks/eksctl/pkg/ami" "github.com/weaveworks/eksctl/pkg/eks" - "github.com/weaveworks/eksctl/pkg/eks/api" "github.com/weaveworks/eksctl/pkg/testutils" ) @@ -170,9 +169,6 @@ func createProviders() (*eks.ClusterProvider, *testutils.MockProvider) { c := &eks.ClusterProvider{ Provider: p, - Spec: &api.ClusterConfig{ - Region: "eu-west-1", - }, } return c, p diff --git a/pkg/az/az_test.go b/pkg/az/az_test.go index 0c897fddb7..ac86600a75 100644 --- a/pkg/az/az_test.go +++ b/pkg/az/az_test.go @@ -5,7 +5,6 @@ import ( . "github.com/weaveworks/eksctl/pkg/az" "github.com/weaveworks/eksctl/pkg/eks" - "github.com/weaveworks/eksctl/pkg/eks/api" "github.com/weaveworks/eksctl/pkg/testutils" "github.com/aws/aws-sdk-go/aws" @@ -255,9 +254,6 @@ func createProviders() (*eks.ClusterProvider, *testutils.MockProvider) { c := &eks.ClusterProvider{ Provider: p, - Spec: &api.ClusterConfig{ - Region: "us-west-1", - }, } return c, p diff --git a/pkg/cfn/builder/api_test.go b/pkg/cfn/builder/api_test.go index d1cd3e5f37..0fb2ac8824 100644 --- a/pkg/cfn/builder/api_test.go +++ b/pkg/cfn/builder/api_test.go @@ -16,6 +16,7 @@ import ( "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/eks/api" "github.com/weaveworks/eksctl/pkg/nodebootstrap" + "github.com/weaveworks/eksctl/pkg/testutils" ) const ( @@ -105,8 +106,8 @@ var _ = Describe("CloudFormation template builder API", func() { cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() - cfg.Region = "us-west-2" - cfg.ClusterName = clusterName + cfg.Metadata.Region = "us-west-2" + cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs ng.InstanceType = "t2.medium" ng.AMIFamily = "AmazonLinux2" @@ -121,8 +122,10 @@ var _ = Describe("CloudFormation template builder API", func() { It("should not error", func() { Expect(err).ShouldNot(HaveOccurred()) }) expected := &api.ClusterConfig{ - Region: "us-west-2", - ClusterName: clusterName, + Metadata: &api.ClusterMeta{ + Region: "us-west-2", + Name: clusterName, + }, Endpoint: endpoint, CertificateAuthorityData: caCertData, ARN: arn, @@ -197,10 +200,10 @@ var _ = Describe("CloudFormation template builder API", func() { } cfg := newClusterConfig() - ctl := eks.New(cfg) + ctl := eks.New(testutils.ProviderConfig, cfg) It("should not error when calling SetSubnets", func() { - err := ctl.SetSubnets() + err := ctl.SetSubnets(cfg) Expect(err).ShouldNot(HaveOccurred()) }) @@ -286,8 +289,8 @@ var _ = Describe("CloudFormation template builder API", func() { cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() - cfg.Region = "us-west-2" - cfg.ClusterName = clusterName + cfg.Metadata.Region = "us-west-2" + cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs ng.InstanceType = "t2.medium" @@ -359,8 +362,8 @@ var _ = Describe("CloudFormation template builder API", func() { cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() - cfg.Region = "us-west-2" - cfg.ClusterName = clusterName + cfg.Metadata.Region = "us-west-2" + cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs ng.AllowSSH = true ng.InstanceType = "t2.medium" @@ -410,8 +413,8 @@ var _ = Describe("CloudFormation template builder API", func() { cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() - cfg.Region = "us-west-2" - cfg.ClusterName = clusterName + cfg.Metadata.Region = "us-west-2" + cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs ng.AllowSSH = true ng.InstanceType = "t2.medium" @@ -463,8 +466,8 @@ var _ = Describe("CloudFormation template builder API", func() { cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() - cfg.Region = "us-west-2" - cfg.ClusterName = clusterName + cfg.Metadata.Region = "us-west-2" + cfg.Metadata.Name = clusterName cfg.AvailabilityZones = testAZs cfg.VPC = &api.ClusterVPC{ Network: api.Network{ diff --git a/pkg/cfn/builder/cluster.go b/pkg/cfn/builder/cluster.go index f470690aa8..c3cd810cd6 100644 --- a/pkg/cfn/builder/cluster.go +++ b/pkg/cfn/builder/cluster.go @@ -79,7 +79,7 @@ func (c *ClusterResourceSet) addResourcesForControlPlane(version string) { } c.newResource("ControlPlane", &gfn.AWSEKSCluster{ - Name: gfn.NewString(c.spec.ClusterName), + Name: gfn.NewString(c.spec.Metadata.Name), RoleArn: gfn.MakeFnGetAttString("ServiceRole.Arn"), Version: gfn.NewString(version), ResourcesVpcConfig: clusterVPC, diff --git a/pkg/cfn/builder/nodegroup.go b/pkg/cfn/builder/nodegroup.go index aa1b31dc9d..2b49d509db 100644 --- a/pkg/cfn/builder/nodegroup.go +++ b/pkg/cfn/builder/nodegroup.go @@ -33,7 +33,7 @@ func NewNodeGroupResourceSet(spec *api.ClusterConfig, clusterStackName string, i rs: newResourceSet(), id: id, clusterStackName: clusterStackName, - nodeGroupName: fmt.Sprintf("%s-%d", spec.ClusterName, id), + nodeGroupName: fmt.Sprintf("%s-%d", spec.Metadata.Name, id), clusterSpec: spec, spec: spec.NodeGroups[id], } @@ -156,7 +156,7 @@ func (n *NodeGroupResourceSet) addResourcesForNodeGroup() error { "VPCZoneIdentifier": vpcZoneIdentifier, "Tags": []map[string]interface{}{ {"Key": "Name", "Value": fmt.Sprintf("%s-Node", n.nodeGroupName), "PropagateAtLaunch": "true"}, - {"Key": "kubernetes.io/cluster/" + n.clusterSpec.ClusterName, "Value": "owned", "PropagateAtLaunch": "true"}, + {"Key": "kubernetes.io/cluster/" + n.clusterSpec.Metadata.Name, "Value": "owned", "PropagateAtLaunch": "true"}, }, }, UpdatePolicy: map[string]map[string]string{ diff --git a/pkg/cfn/builder/vpc.go b/pkg/cfn/builder/vpc.go index 2f98b4e272..b6558da59f 100644 --- a/pkg/cfn/builder/vpc.go +++ b/pkg/cfn/builder/vpc.go @@ -129,7 +129,7 @@ func (n *NodeGroupResourceSet) addResourcesForSecurityGroups() { VpcId: makeImportValue(n.clusterStackName, cfnOutputClusterVPC), GroupDescription: gfn.NewString("Communication between the control plane and " + desc), Tags: []gfn.Tag{{ - Key: gfn.NewString("kubernetes.io/cluster/" + n.clusterSpec.ClusterName), + Key: gfn.NewString("kubernetes.io/cluster/" + n.clusterSpec.Metadata.Name), Value: gfn.NewString("owned"), }}, }) diff --git a/pkg/cfn/manager/api.go b/pkg/cfn/manager/api.go index 4197bc5584..f7011ff130 100644 --- a/pkg/cfn/manager/api.go +++ b/pkg/cfn/manager/api.go @@ -36,9 +36,10 @@ type ChangeSet = cloudformation.DescribeChangeSetOutput // StackCollection stores the CloudFormation stack information type StackCollection struct { - cfn cloudformationiface.CloudFormationAPI - spec *api.ClusterConfig - tags []*cloudformation.Tag + cfn cloudformationiface.CloudFormationAPI + waitTimeout time.Duration + spec *api.ClusterConfig + tags []*cloudformation.Tag } func newTag(key, value string) *cloudformation.Tag { @@ -48,16 +49,17 @@ func newTag(key, value string) *cloudformation.Tag { // NewStackCollection create a stack manager for a single cluster func NewStackCollection(provider api.ClusterProvider, spec *api.ClusterConfig) *StackCollection { tags := []*cloudformation.Tag{ - newTag(ClusterNameTag, spec.ClusterName), + newTag(ClusterNameTag, spec.Metadata.Name), } - for key, value := range spec.Tags { + for key, value := range spec.Metadata.Tags { tags = append(tags, newTag(key, value)) } logger.Debug("tags = %#v", tags) return &StackCollection{ - cfn: provider.CloudFormation(), - spec: spec, - tags: tags, + cfn: provider.CloudFormation(), + waitTimeout: provider.WaitTimeout(), + spec: spec, + tags: tags, } } @@ -203,7 +205,7 @@ func (c *StackCollection) DeleteStack(name string) (*Stack, error) { } i.StackId = s.StackId for _, tag := range s.Tags { - if *tag.Key == ClusterNameTag && *tag.Value == c.spec.ClusterName { + if *tag.Key == ClusterNameTag && *tag.Value == c.spec.Metadata.Name { input := &cloudformation.DeleteStackInput{ StackName: i.StackId, } @@ -217,7 +219,7 @@ func (c *StackCollection) DeleteStack(name string) (*Stack, error) { } return nil, fmt.Errorf("cannot delete stack %q as it doesn't bare our %q tag", *s.StackName, - fmt.Sprintf("%s:%s", ClusterNameTag, c.spec.ClusterName)) + fmt.Sprintf("%s:%s", ClusterNameTag, c.spec.Metadata.Name)) } // WaitDeleteStack kills a stack by name and waits for DELETED status diff --git a/pkg/cfn/manager/cluster.go b/pkg/cfn/manager/cluster.go index f00eecd5a1..2b0a68857a 100644 --- a/pkg/cfn/manager/cluster.go +++ b/pkg/cfn/manager/cluster.go @@ -7,7 +7,7 @@ import ( ) func (c *StackCollection) makeClusterStackName() string { - return "eksctl-" + c.spec.ClusterName + "-cluster" + return "eksctl-" + c.spec.Metadata.Name + "-cluster" } // CreateCluster creates the cluster diff --git a/pkg/cfn/manager/deprecated.go b/pkg/cfn/manager/deprecated.go index 2b1a239d58..923f0dd231 100644 --- a/pkg/cfn/manager/deprecated.go +++ b/pkg/cfn/manager/deprecated.go @@ -3,7 +3,7 @@ package manager // DeprecatedDeleteStackVPC deletes the VPC stack func (c *StackCollection) DeprecatedDeleteStackVPC(wait bool) error { var err error - stackName := "EKS-" + c.spec.ClusterName + "-VPC" + stackName := "EKS-" + c.spec.Metadata.Name + "-VPC" if wait { err = c.WaitDeleteStack(stackName) @@ -17,7 +17,7 @@ func (c *StackCollection) DeprecatedDeleteStackVPC(wait bool) error { // DeprecatedDeleteStackServiceRole deletes the service role stack func (c *StackCollection) DeprecatedDeleteStackServiceRole(wait bool) error { var err error - stackName := "EKS-" + c.spec.ClusterName + "-ServiceRole" + stackName := "EKS-" + c.spec.Metadata.Name + "-ServiceRole" if wait { err = c.WaitDeleteStack(stackName) @@ -31,7 +31,7 @@ func (c *StackCollection) DeprecatedDeleteStackServiceRole(wait bool) error { // DeprecatedDeleteStackDefaultNodeGroup deletes the default node group stack func (c *StackCollection) DeprecatedDeleteStackDefaultNodeGroup(wait bool) error { var err error - stackName := "EKS-" + c.spec.ClusterName + "-DefaultNodeGroup" + stackName := "EKS-" + c.spec.Metadata.Name + "-DefaultNodeGroup" if wait { err = c.WaitDeleteStack(stackName) @@ -45,7 +45,7 @@ func (c *StackCollection) DeprecatedDeleteStackDefaultNodeGroup(wait bool) error // DeprecatedDeleteStackControlPlane deletes the control plane stack func (c *StackCollection) DeprecatedDeleteStackControlPlane(wait bool) error { var err error - stackName := "EKS-" + c.spec.ClusterName + "-ControlPlane" + stackName := "EKS-" + c.spec.Metadata.Name + "-ControlPlane" if wait { err = c.WaitDeleteStack(stackName) diff --git a/pkg/cfn/manager/nodegroup.go b/pkg/cfn/manager/nodegroup.go index 0cd9419493..78c5f23b0a 100644 --- a/pkg/cfn/manager/nodegroup.go +++ b/pkg/cfn/manager/nodegroup.go @@ -22,7 +22,7 @@ const ( ) func (c *StackCollection) makeNodeGroupStackName(id int) string { - return fmt.Sprintf("eksctl-%s-nodegroup-%d", c.spec.ClusterName, id) + return fmt.Sprintf("eksctl-%s-nodegroup-%d", c.spec.Metadata.Name, id) } // CreateNodeGroup creates the nodegroup @@ -45,7 +45,7 @@ func (c *StackCollection) CreateNodeGroup(errs chan error, data interface{}) err } func (c *StackCollection) listAllNodeGroups() ([]string, error) { - stacks, err := c.ListStacks(fmt.Sprintf("^eksctl-%s-nodegroup-\\d$", c.spec.ClusterName)) + stacks, err := c.ListStacks(fmt.Sprintf("^eksctl-%s-nodegroup-\\d$", c.spec.Metadata.Name)) if err != nil { return nil, err } diff --git a/pkg/cfn/manager/waiters.go b/pkg/cfn/manager/waiters.go index a07ac295a9..afaa71e0a5 100644 --- a/pkg/cfn/manager/waiters.go +++ b/pkg/cfn/manager/waiters.go @@ -73,7 +73,7 @@ func (c *StackCollection) waitWithAcceptors(i *Stack, acceptors []request.Waiter desiredStatus := fmt.Sprintf("%v", acceptors[0].Expected) msg := fmt.Sprintf("waiting for CloudFormation stack %q to reach %q status", *i.StackName, desiredStatus) - ctx, cancel := context.WithTimeout(context.Background(), c.spec.WaitTimeout) + ctx, cancel := context.WithTimeout(context.Background(), c.waitTimeout) defer cancel() startTime := time.Now() @@ -118,7 +118,7 @@ func (c *StackCollection) waitWithAcceptors(i *Stack, acceptors []request.Waiter func (c *StackCollection) waitWithAcceptorsChangeSet(i *Stack, changesetName *string, acceptors []request.WaiterAcceptor) error { desiredStatus := fmt.Sprintf("%v", acceptors[0].Expected) msg := fmt.Sprintf("waiting for CloudFormation changeset %q for stack %q to reach %q status", *changesetName, *i.StackName, desiredStatus) - ctx, cancel := context.WithTimeout(context.Background(), c.spec.WaitTimeout) + ctx, cancel := context.WithTimeout(context.Background(), c.waitTimeout) defer cancel() startTime := time.Now() w := request.Waiter{ diff --git a/pkg/ctl/create/cluster.go b/pkg/ctl/create/cluster.go index e331282de5..0a5f9c068d 100644 --- a/pkg/ctl/create/cluster.go +++ b/pkg/ctl/create/cluster.go @@ -34,6 +34,7 @@ var ( ) func createClusterCmd() *cobra.Command { + p := &api.ProviderConfig{} cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() @@ -41,7 +42,7 @@ func createClusterCmd() *cobra.Command { Use: "cluster", Short: "Create a cluster", Run: func(_ *cobra.Command, args []string) { - if err := doCreateCluster(cfg, ng, ctl.GetNameArg(args)); err != nil { + if err := doCreateCluster(p, cfg, ng, ctl.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -52,11 +53,11 @@ func createClusterCmd() *cobra.Command { exampleClusterName := utils.ClusterName("", "") - fs.StringVarP(&cfg.ClusterName, "name", "n", "", fmt.Sprintf("EKS cluster name (generated if unspecified, e.g. %q)", exampleClusterName)) + fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", fmt.Sprintf("EKS cluster name (generated if unspecified, e.g. %q)", exampleClusterName)) - fs.StringVarP(&cfg.Region, "region", "r", "", "AWS region") - fs.StringVarP(&cfg.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") - fs.StringToStringVarP(&cfg.Tags, "tags", "", map[string]string{}, `A list of KV pairs used to tag the AWS resources (e.g. "Owner=John Doe,Team=Some Team")`) + fs.StringVarP(&p.Region, "region", "r", "", "AWS region") + fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") + fs.StringToStringVarP(&cfg.Metadata.Tags, "tags", "", map[string]string{}, `A list of KV pairs used to tag the AWS resources (e.g. "Owner=John Doe,Team=Some Team")`) fs.StringVarP(&ng.InstanceType, "node-type", "t", defaultNodeType, "node instance type") fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", api.DefaultNodeCount, "total number of nodes (desired capacity of AGS)") @@ -77,12 +78,12 @@ func createClusterCmd() *cobra.Command { fs.StringVar(&kubeconfigPath, "kubeconfig", kubeconfig.DefaultPath, "path to write kubeconfig (incompatible with --auto-kubeconfig)") fs.BoolVar(&setContext, "set-kubeconfig-context", true, "if true then current-context will be set in kubeconfig; if a context is already set then it will be overwritten") - fs.DurationVar(&cfg.WaitTimeout, "aws-api-timeout", api.DefaultWaitTimeout, "") + fs.DurationVar(&p.WaitTimeout, "aws-api-timeout", api.DefaultWaitTimeout, "") // TODO deprecate in 0.2.0 if err := fs.MarkHidden("aws-api-timeout"); err != nil { logger.Debug("ignoring error %q", err.Error()) } - fs.DurationVar(&cfg.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") + fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") fs.BoolVar(&cfg.Addons.WithIAM.PolicyAmazonEC2ContainerRegistryPowerUser, "full-ecr-access", false, "enable full access to ECR") fs.BoolVar(&cfg.Addons.WithIAM.PolicyAutoScaling, "asg-access", false, "enable iam policy dependency for cluster-autoscaler") @@ -105,28 +106,29 @@ func createClusterCmd() *cobra.Command { return cmd } -func doCreateCluster(cfg *api.ClusterConfig, ng *api.NodeGroup, name string) error { - ctl := eks.New(cfg) +func doCreateCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup, nameArg string) error { + cl := cfg.Metadata + ctl := eks.New(p, cfg) - if !cfg.IsSupportedRegion() { - return fmt.Errorf("--region=%s is not supported - use one of: %s", cfg.Region, strings.Join(api.SupportedRegions(), ", ")) + if !ctl.IsSupportedRegion() { + return fmt.Errorf("--region=%s is not supported - use one of: %s", cl.Region, strings.Join(api.SupportedRegions(), ", ")) } - logger.Info("using region %s", cfg.Region) + logger.Info("using region %s", cl.Region) if err := ctl.CheckAuth(); err != nil { return err } - if utils.ClusterName(cfg.ClusterName, name) == "" { - return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.ClusterName, name) + if utils.ClusterName(cl.Name, nameArg) == "" { + return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cl.Name, nameArg) } - cfg.ClusterName = utils.ClusterName(cfg.ClusterName, name) + cl.Name = utils.ClusterName(cl.Name, nameArg) if autoKubeconfigPath { if kubeconfigPath != kubeconfig.DefaultPath { return fmt.Errorf("--kubeconfig and --auto-kubeconfig cannot be used at the same time") } - kubeconfigPath = kubeconfig.AutoPath(cfg.ClusterName) + kubeconfigPath = kubeconfig.AutoPath(cl.Name) } if ng.SSHPublicKeyPath == "" { @@ -152,10 +154,10 @@ func doCreateCluster(cfg *api.ClusterConfig, ng *api.NodeGroup, name string) err if !subnetsGiven && kopsClusterNameForVPC == "" { // default: create dedicated VPC - if err := ctl.SetAvailabilityZones(availabilityZones); err != nil { + if err := ctl.SetAvailabilityZones(cfg, availabilityZones); err != nil { return err } - if err := ctl.SetSubnets(); err != nil { + if err := ctl.SetSubnets(cfg); err != nil { return err } return nil @@ -171,7 +173,7 @@ func doCreateCluster(cfg *api.ClusterConfig, ng *api.NodeGroup, name string) err return fmt.Errorf("--vpc-from-kops-cluster and --vpc-private-subnets/--vpc-public-subnets cannot be used at the same time") } - kw, err := kops.NewWrapper(cfg.Region, kopsClusterNameForVPC) + kw, err := kops.NewWrapper(p.Region, kopsClusterNameForVPC) if err != nil { return err } @@ -196,7 +198,7 @@ func doCreateCluster(cfg *api.ClusterConfig, ng *api.NodeGroup, name string) err } for topology := range subnets { - if err := ctl.UseSubnets(topology, *subnets[topology]); err != nil { + if err := ctl.UseSubnets(cfg, topology, *subnets[topology]); err != nil { return err } } @@ -223,36 +225,36 @@ func doCreateCluster(cfg *api.ClusterConfig, ng *api.NodeGroup, name string) err return err } - if err := ctl.LoadSSHPublicKey(ng); err != nil { + if err := ctl.LoadSSHPublicKey(cl.Name, ng); err != nil { return err } logger.Debug("cfg = %#v", cfg) - logger.Info("creating EKS cluster %q in %q region", cfg.ClusterName, cfg.Region) + logger.Info("creating %s", cl.LogString()) { // core action - stackManager := ctl.NewStackManager() + stackManager := ctl.NewStackManager(cfg) logger.Info("will create 2 separate CloudFormation stacks for cluster itself and the initial nodegroup") - logger.Info("if you encounter any issues, check CloudFormation console or try 'eksctl utils describe-stacks --region=%s --name=%s'", cfg.Region, cfg.ClusterName) + logger.Info("if you encounter any issues, check CloudFormation console or try 'eksctl utils describe-stacks --region=%s --name=%s'", cl.Region, cl.Name) errs := stackManager.CreateClusterWithNodeGroups() // read any errors (it only gets non-nil errors) if len(errs) > 0 { logger.Info("%d error(s) occurred and cluster hasn't been created properly, you may wish to check CloudFormation console", len(errs)) - logger.Info("to cleanup resources, run 'eksctl delete cluster --region=%s --name=%s'", cfg.Region, cfg.ClusterName) + logger.Info("to cleanup resources, run 'eksctl delete cluster --region=%s --name=%s'", cl.Region, cl.Name) for _, err := range errs { logger.Critical("%s\n", err.Error()) } - return fmt.Errorf("failed to create cluster %q", cfg.ClusterName) + return fmt.Errorf("failed to create cluster %q", cl.Name) } } - logger.Success("all EKS cluster resource for %q had been created", cfg.ClusterName) + logger.Success("all EKS cluster resource for %q had been created", cl.Name) // obtain cluster credentials, write kubeconfig { // post-creation action - clientConfigBase, err := ctl.NewClientConfig() + clientConfigBase, err := ctl.NewClientConfig(cfg) if err != nil { return err } @@ -275,7 +277,7 @@ func doCreateCluster(cfg *api.ClusterConfig, ng *api.NodeGroup, name string) err return err } - if err = ctl.WaitForControlPlane(clientSet); err != nil { + if err = ctl.WaitForControlPlane(cl, clientSet); err != nil { return err } @@ -315,7 +317,7 @@ func doCreateCluster(cfg *api.ClusterConfig, ng *api.NodeGroup, name string) err } } - logger.Success("EKS cluster %q in %q region is ready", cfg.ClusterName, cfg.Region) + logger.Success("%s is ready", cl.LogString()) return nil } diff --git a/pkg/ctl/delete/cluster.go b/pkg/ctl/delete/cluster.go index 7a1f4077a8..c9d63652fe 100644 --- a/pkg/ctl/delete/cluster.go +++ b/pkg/ctl/delete/cluster.go @@ -14,13 +14,14 @@ import ( ) func deleteClusterCmd() *cobra.Command { + p := &api.ProviderConfig{} cfg := api.NewClusterConfig() cmd := &cobra.Command{ Use: "cluster", Short: "Delete a cluster", Run: func(_ *cobra.Command, args []string) { - if err := doDeleteCluster(cfg, ctl.GetNameArg(args)); err != nil { + if err := doDeleteCluster(p, cfg, ctl.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -29,38 +30,38 @@ func deleteClusterCmd() *cobra.Command { fs := cmd.Flags() - fs.StringVarP(&cfg.ClusterName, "name", "n", "", "EKS cluster name (required)") + fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name (required)") - fs.StringVarP(&cfg.Region, "region", "r", "", "AWS region") - fs.StringVarP(&cfg.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") + fs.StringVarP(&p.Region, "region", "r", "", "AWS region") + fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") fs.BoolVarP(&waitDelete, "wait", "w", false, "Wait for deletion of all resources before exiting") - fs.DurationVar(&cfg.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") + fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") return cmd } -func doDeleteCluster(cfg *api.ClusterConfig, name string) error { - ctl := eks.New(cfg) +func doDeleteCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { + ctl := eks.New(p, cfg) if err := ctl.CheckAuth(); err != nil { return err } - if cfg.ClusterName != "" && name != "" { - return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.ClusterName, name) + if cfg.Metadata.Name != "" && name != "" { + return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.Metadata.Name, name) } if name != "" { - cfg.ClusterName = name + cfg.Metadata.Name = name } - if cfg.ClusterName == "" { + if cfg.Metadata.Name == "" { return fmt.Errorf("--name must be set") } - logger.Info("deleting EKS cluster %q", cfg.ClusterName) + logger.Info("deleting EKS cluster %q", cfg.Metadata.Name) var deletedResources []string @@ -76,7 +77,7 @@ func doDeleteCluster(cfg *api.ClusterConfig, name string) error { // We can remove all 'DeprecatedDelete*' calls in 0.2.0 - stackManager := ctl.NewStackManager() + stackManager := ctl.NewStackManager(cfg) { errs := stackManager.WaitDeleteAllNodeGroups() @@ -98,7 +99,7 @@ func doDeleteCluster(cfg *api.ClusterConfig, name string) error { } if clusterErr { - if handleIfError(ctl.DeprecatedDeleteControlPlane(), "control plane") { + if handleIfError(ctl.DeprecatedDeleteControlPlane(cfg.Metadata), "control plane") { handleIfError(stackManager.DeprecatedDeleteStackControlPlane(waitDelete), "stack control plane (deprecated)") } } @@ -107,14 +108,14 @@ func doDeleteCluster(cfg *api.ClusterConfig, name string) error { handleIfError(stackManager.DeprecatedDeleteStackVPC(waitDelete), "stack VPC (deprecated)") handleIfError(stackManager.DeprecatedDeleteStackDefaultNodeGroup(waitDelete), "default nodegroup (deprecated)") - ctl.MaybeDeletePublicSSHKey() + ctl.MaybeDeletePublicSSHKey(cfg.Metadata.Name) - kubeconfig.MaybeDeleteConfig(cfg) + kubeconfig.MaybeDeleteConfig(cfg.Metadata) if len(deletedResources) == 0 { - logger.Warning("no EKS cluster resources were found for %q", ctl.Spec.ClusterName) + logger.Warning("no EKS cluster resources were found for %q", cfg.Metadata.Name) } else { - logger.Success("the following EKS cluster resource(s) for %q will be deleted: %s. If in doubt, check CloudFormation console", ctl.Spec.ClusterName, strings.Join(deletedResources, ", ")) + logger.Success("the following EKS cluster resource(s) for %q will be deleted: %s. If in doubt, check CloudFormation console", cfg.Metadata.Name, strings.Join(deletedResources, ", ")) } return nil diff --git a/pkg/ctl/get/cluster.go b/pkg/ctl/get/cluster.go index bc7ba86591..2d1d2aad01 100644 --- a/pkg/ctl/get/cluster.go +++ b/pkg/ctl/get/cluster.go @@ -15,6 +15,7 @@ import ( var listAllRegions bool func getClusterCmd() *cobra.Command { + p := &api.ProviderConfig{} cfg := api.NewClusterConfig() cmd := &cobra.Command{ @@ -22,7 +23,7 @@ func getClusterCmd() *cobra.Command { Short: "Get cluster(s)", Aliases: []string{"clusters"}, Run: func(_ *cobra.Command, args []string) { - if err := doGetCluster(cfg, ctl.GetNameArg(args)); err != nil { + if err := doGetCluster(p, cfg, ctl.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -31,38 +32,39 @@ func getClusterCmd() *cobra.Command { fs := cmd.Flags() - fs.StringVarP(&cfg.ClusterName, "name", "n", "", "EKS cluster name") + fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name") fs.BoolVarP(&listAllRegions, "all-regions", "A", false, "List clusters across all supported regions") fs.IntVar(&chunkSize, "chunk-size", defaultChunkSize, "Return large lists in chunks rather than all at once. Pass 0 to disable.") - fs.StringVarP(&cfg.Region, "region", "r", "", "AWS region") - fs.StringVarP(&cfg.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") + fs.StringVarP(&p.Region, "region", "r", "", "AWS region") + fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") fs.StringVarP(&output, "output", "o", "table", "Specifies the output format. Choose from table,json,yaml. Defaults to table.") + return cmd } -func doGetCluster(cfg *api.ClusterConfig, name string) error { - regionGiven := cfg.Region != "" // eks.New resets this field, so we need to check if it was set in the fist place - ctl := eks.New(cfg) +func doGetCluster(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { + regionGiven := cfg.Metadata.Region != "" // eks.New resets this field, so we need to check if it was set in the fist place + ctl := eks.New(p, cfg) - if !cfg.IsSupportedRegion() { - return fmt.Errorf("--region=%s is not supported - use one of: %s", cfg.Region, strings.Join(api.SupportedRegions(), ", ")) + if !ctl.IsSupportedRegion() { + return fmt.Errorf("--region=%s is not supported - use one of: %s", cfg.Metadata.Region, strings.Join(api.SupportedRegions(), ", ")) } if regionGiven && listAllRegions { - logger.Warning("--region=%s is ignored, as --all-regions is given", cfg.Region) + logger.Warning("--region=%s is ignored, as --all-regions is given", cfg.Metadata.Region) } - if cfg.ClusterName != "" && name != "" { - return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.ClusterName, name) + if cfg.Metadata.Name != "" && name != "" { + return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.Metadata.Name, name) } if name != "" { - cfg.ClusterName = name + cfg.Metadata.Name = name } - if cfg.ClusterName != "" && listAllRegions { + if cfg.Metadata.Name != "" && listAllRegions { return fmt.Errorf("--all-regions is for listing all clusters, it must be used without cluster name flag/argument") } @@ -70,5 +72,5 @@ func doGetCluster(cfg *api.ClusterConfig, name string) error { return err } - return ctl.ListClusters(chunkSize, output, listAllRegions) + return ctl.ListClusters(cfg.Metadata.Name, chunkSize, output, listAllRegions) } diff --git a/pkg/ctl/scale/nodegroup.go b/pkg/ctl/scale/nodegroup.go index f217b5ca3b..0daf529f7e 100644 --- a/pkg/ctl/scale/nodegroup.go +++ b/pkg/ctl/scale/nodegroup.go @@ -11,6 +11,7 @@ import ( ) func scaleNodeGroupCmd() *cobra.Command { + p := &api.ProviderConfig{} cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() @@ -18,7 +19,7 @@ func scaleNodeGroupCmd() *cobra.Command { Use: "nodegroup", Short: "Scale a nodegroup", Run: func(_ *cobra.Command, args []string) { - if err := doScaleNodeGroup(cfg, ng); err != nil { + if err := doScaleNodeGroup(p, cfg, ng); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -27,26 +28,26 @@ func scaleNodeGroupCmd() *cobra.Command { fs := cmd.Flags() - fs.StringVarP(&cfg.ClusterName, "name", "n", "", "EKS cluster name") + fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name") fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", -1, "total number of nodes (scale to this number)") - fs.StringVarP(&cfg.Region, "region", "r", "", "AWS region") - fs.StringVarP(&cfg.Profile, "profile", "p", "", "AWS creditials profile to use (overrides the AWS_PROFILE environment variable)") + fs.StringVarP(&p.Region, "region", "r", "", "AWS region") + fs.StringVarP(&p.Profile, "profile", "p", "", "AWS creditials profile to use (overrides the AWS_PROFILE environment variable)") - fs.DurationVar(&cfg.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") + fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "max wait time in any polling operations") return cmd } -func doScaleNodeGroup(cfg *api.ClusterConfig, ng *api.NodeGroup) error { - ctl := eks.New(cfg) +func doScaleNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) error { + ctl := eks.New(p, cfg) if err := ctl.CheckAuth(); err != nil { return err } - if cfg.ClusterName == "" { + if cfg.Metadata.Name == "" { return fmt.Errorf("no cluster name supplied. Use the --name= flag") } @@ -54,10 +55,10 @@ func doScaleNodeGroup(cfg *api.ClusterConfig, ng *api.NodeGroup) error { return fmt.Errorf("number of nodes must be 0 or greater. Use the --nodes/-N flag") } - stackManager := ctl.NewStackManager() + stackManager := ctl.NewStackManager(cfg) err := stackManager.ScaleInitialNodeGroup() if err != nil { - return fmt.Errorf("failed to scale nodegroup for cluster %q, error %v", cfg.ClusterName, err) + return fmt.Errorf("failed to scale nodegroup for cluster %q, error %v", cfg.Metadata.Name, err) } return nil diff --git a/pkg/ctl/utils/describe_stacks.go b/pkg/ctl/utils/describe_stacks.go index e738daaa3d..5f1b0d0d31 100644 --- a/pkg/ctl/utils/describe_stacks.go +++ b/pkg/ctl/utils/describe_stacks.go @@ -18,13 +18,14 @@ var ( ) func describeStacksCmd() *cobra.Command { + p := &api.ProviderConfig{} cfg := api.NewClusterConfig() cmd := &cobra.Command{ Use: "describe-stacks", Short: "Describe CloudFormation stack for a given cluster", Run: func(_ *cobra.Command, args []string) { - if err := doDescribeStacksCmd(cfg, ctl.GetNameArg(args)); err != nil { + if err := doDescribeStacksCmd(p, cfg, ctl.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -33,10 +34,10 @@ func describeStacksCmd() *cobra.Command { fs := cmd.Flags() - fs.StringVarP(&cfg.ClusterName, "name", "n", "", "EKS cluster name (required)") + fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name (required)") - fs.StringVarP(&cfg.Region, "region", "r", "", "AWS region") - fs.StringVarP(&cfg.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") + fs.StringVarP(&p.Region, "region", "r", "", "AWS region") + fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") fs.BoolVar(&utilsDescribeStackAll, "all", false, "include deleted stacks") fs.BoolVar(&utilsDescribeStackEvents, "events", false, "include stack events") @@ -44,28 +45,28 @@ func describeStacksCmd() *cobra.Command { return cmd } -func doDescribeStacksCmd(cfg *api.ClusterConfig, name string) error { - ctl := eks.New(cfg) +func doDescribeStacksCmd(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { + ctl := eks.New(p, cfg) if err := ctl.CheckAuth(); err != nil { return err } - if cfg.ClusterName != "" && name != "" { - return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.ClusterName, name) + if cfg.Metadata.Name != "" && name != "" { + return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.Metadata.Name, name) } if name != "" { - cfg.ClusterName = name + cfg.Metadata.Name = name } - if cfg.ClusterName == "" { + if cfg.Metadata.Name == "" { return fmt.Errorf("--name must be set") } - stackManager := ctl.NewStackManager() + stackManager := ctl.NewStackManager(cfg) - stacks, err := stackManager.DescribeStacks(cfg.ClusterName) + stacks, err := stackManager.DescribeStacks(cfg.Metadata.Name) if err != nil { return err } diff --git a/pkg/ctl/utils/wait_nodes.go b/pkg/ctl/utils/wait_nodes.go index 86e42f1d93..f31ec0ae16 100644 --- a/pkg/ctl/utils/wait_nodes.go +++ b/pkg/ctl/utils/wait_nodes.go @@ -13,6 +13,7 @@ import ( ) func waitNodesCmd() *cobra.Command { + p := &api.ProviderConfig{} cfg := api.NewClusterConfig() ng := cfg.NewNodeGroup() @@ -20,7 +21,7 @@ func waitNodesCmd() *cobra.Command { Use: "wait-nodes", Short: "Wait for nodes", Run: func(_ *cobra.Command, _ []string) { - if err := doWaitNodes(cfg, ng); err != nil { + if err := doWaitNodes(p, cfg, ng); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -31,13 +32,13 @@ func waitNodesCmd() *cobra.Command { fs.StringVar(&utilsKubeconfigInputPath, "kubeconfig", "kubeconfig", "path to read kubeconfig") fs.IntVarP(&ng.MinSize, "nodes-min", "m", api.DefaultNodeCount, "minimum number of nodes to wait for") - fs.DurationVar(&cfg.WaitTimeout, "timeout", api.DefaultWaitTimeout, "how long to wait") + fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "how long to wait") return cmd } -func doWaitNodes(cfg *api.ClusterConfig, ng *api.NodeGroup) error { - ctl := eks.New(cfg) +func doWaitNodes(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) error { + ctl := eks.New(p, cfg) if utilsKubeconfigInputPath == "" { return fmt.Errorf("--kubeconfig must be set") diff --git a/pkg/ctl/utils/write_kubeconfig.go b/pkg/ctl/utils/write_kubeconfig.go index f2a47540fc..00c760b13d 100644 --- a/pkg/ctl/utils/write_kubeconfig.go +++ b/pkg/ctl/utils/write_kubeconfig.go @@ -21,13 +21,14 @@ var ( ) func writeKubeconfigCmd() *cobra.Command { + p := &api.ProviderConfig{} cfg := api.NewClusterConfig() cmd := &cobra.Command{ Use: "write-kubeconfig", Short: "Write kubeconfig file for a given cluster", Run: func(_ *cobra.Command, args []string) { - if err := doWriteKubeconfigCmd(cfg, ctl.GetNameArg(args)); err != nil { + if err := doWriteKubeconfigCmd(p, cfg, ctl.GetNameArg(args)); err != nil { logger.Critical("%s\n", err.Error()) os.Exit(1) } @@ -36,10 +37,10 @@ func writeKubeconfigCmd() *cobra.Command { fs := cmd.Flags() - fs.StringVarP(&cfg.ClusterName, "name", "n", "", "EKS cluster name (required)") + fs.StringVarP(&cfg.Metadata.Name, "name", "n", "", "EKS cluster name (required)") - fs.StringVarP(&cfg.Region, "region", "r", "", "AWS region") - fs.StringVarP(&cfg.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") + fs.StringVarP(&p.Region, "region", "r", "", "AWS region") + fs.StringVarP(&p.Profile, "profile", "p", "", "AWS credentials profile to use (overrides the AWS_PROFILE environment variable)") fs.BoolVar(&utilsAutoKubeconfigPath, "auto-kubeconfig", false, fmt.Sprintf("save kubeconfig file by cluster name – %q", kubeconfig.AutoPath(""))) fs.StringVar(&utilsKubeconfigOutputPath, "kubeconfig", kubeconfig.DefaultPath, "path to write kubeconfig") @@ -48,22 +49,22 @@ func writeKubeconfigCmd() *cobra.Command { return cmd } -func doWriteKubeconfigCmd(cfg *api.ClusterConfig, name string) error { - ctl := eks.New(cfg) +func doWriteKubeconfigCmd(p *api.ProviderConfig, cfg *api.ClusterConfig, name string) error { + ctl := eks.New(p, cfg) if err := ctl.CheckAuth(); err != nil { return err } - if cfg.ClusterName != "" && name != "" { - return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.ClusterName, name) + if cfg.Metadata.Name != "" && name != "" { + return fmt.Errorf("--name=%s and argument %s cannot be used at the same time", cfg.Metadata.Name, name) } if name != "" { - cfg.ClusterName = name + cfg.Metadata.Name = name } - if cfg.ClusterName == "" { + if cfg.Metadata.Name == "" { return fmt.Errorf("--name must be set") } @@ -71,21 +72,21 @@ func doWriteKubeconfigCmd(cfg *api.ClusterConfig, name string) error { if utilsKubeconfigOutputPath != kubeconfig.DefaultPath { return fmt.Errorf("--kubeconfig and --auto-kubeconfig cannot be used at the same time") } - utilsKubeconfigOutputPath = kubeconfig.AutoPath(cfg.ClusterName) + utilsKubeconfigOutputPath = kubeconfig.AutoPath(cfg.Metadata.Name) } - cluster, err := ctl.DescribeControlPlane() + cluster, err := ctl.DescribeControlPlane(cfg.Metadata) if err != nil { return err } logger.Debug("cluster = %#v", cluster) - if err = ctl.GetCredentials(*cluster); err != nil { + if err = ctl.GetCredentials(*cluster, cfg); err != nil { return err } - clientConfigBase, err := ctl.NewClientConfig() + clientConfigBase, err := ctl.NewClientConfig(cfg) if err != nil { return err } diff --git a/pkg/eks/api.go b/pkg/eks/api.go index 81a27ff963..a63b7af419 100644 --- a/pkg/eks/api.go +++ b/pkg/eks/api.go @@ -32,7 +32,6 @@ import ( // ClusterProvider stores information about the cluster type ClusterProvider struct { // core fields used for config and AWS APIs - Spec *api.ClusterConfig Provider api.ClusterProvider // informative fields, i.e. used as outputs Status *ProviderStatus @@ -40,10 +39,11 @@ type ClusterProvider struct { // ProviderServices stores the used APIs type ProviderServices struct { - cfn cloudformationiface.CloudFormationAPI - eks eksiface.EKSAPI - ec2 ec2iface.EC2API - sts stsiface.STSAPI + spec *api.ProviderConfig + cfn cloudformationiface.CloudFormationAPI + eks eksiface.EKSAPI + ec2 ec2iface.EC2API + sts stsiface.STSAPI } // CloudFormation returns a representation of the CloudFormation API @@ -58,6 +58,15 @@ func (p ProviderServices) EC2() ec2iface.EC2API { return p.ec2 } // STS returns a representation of the STS API func (p ProviderServices) STS() stsiface.STSAPI { return p.sts } +// Region returns provider-level region setting +func (p ProviderServices) Region() string { return p.spec.Region } + +// Profile returns provider-level profile name +func (p ProviderServices) Profile() string { return p.spec.Profile } + +// WaitTimeout returns provider-level duration after which any wait operation has to timeout +func (p ProviderServices) WaitTimeout() time.Duration { return p.spec.WaitTimeout } + // ProviderStatus stores information about the used IAM role and the resulting session type ProviderStatus struct { iamRoleARN string @@ -65,46 +74,60 @@ type ProviderStatus struct { } // New creates a new setup of the used AWS APIs -func New(clusterConfig *api.ClusterConfig) *ClusterProvider { +func New(spec *api.ProviderConfig, clusterSpec *api.ClusterConfig) *ClusterProvider { + provider := &ProviderServices{ + spec: spec, + } + c := &ClusterProvider{ + Provider: provider, + } // Create a new session and save credentials for possible // later re-use if overriding sessions due to custom URL - s := newSession(clusterConfig, "", nil) + s := c.newSession(spec, "", nil) - provider := &ProviderServices{ - cfn: cloudformation.New(s), - eks: awseks.New(s), - ec2: ec2.New(s), - sts: sts.New(s), - } + provider.cfn = cloudformation.New(s) + provider.eks = awseks.New(s) + provider.ec2 = ec2.New(s) + provider.sts = sts.New(s) - status := &ProviderStatus{ + c.Status = &ProviderStatus{ sessionCreds: s.Config.Credentials, } // override sessions if any custom endpoints specified if endpoint, ok := os.LookupEnv("AWS_CLOUDFORMATION_ENDPOINT"); ok { logger.Debug("Setting CloudFormation endpoint to %s", endpoint) - provider.cfn = cloudformation.New(newSession(clusterConfig, endpoint, status.sessionCreds)) + provider.cfn = cloudformation.New(c.newSession(spec, endpoint, c.Status.sessionCreds)) } if endpoint, ok := os.LookupEnv("AWS_EKS_ENDPOINT"); ok { logger.Debug("Setting EKS endpoint to %s", endpoint) - provider.eks = awseks.New(newSession(clusterConfig, endpoint, status.sessionCreds)) + provider.eks = awseks.New(c.newSession(spec, endpoint, c.Status.sessionCreds)) } if endpoint, ok := os.LookupEnv("AWS_EC2_ENDPOINT"); ok { logger.Debug("Setting EC2 endpoint to %s", endpoint) - provider.ec2 = ec2.New(newSession(clusterConfig, endpoint, status.sessionCreds)) + provider.ec2 = ec2.New(c.newSession(spec, endpoint, c.Status.sessionCreds)) } if endpoint, ok := os.LookupEnv("AWS_STS_ENDPOINT"); ok { logger.Debug("Setting STS endpoint to %s", endpoint) - provider.sts = sts.New(newSession(clusterConfig, endpoint, status.sessionCreds)) + provider.sts = sts.New(c.newSession(spec, endpoint, c.Status.sessionCreds)) } - return &ClusterProvider{ - Spec: clusterConfig, - Provider: provider, - Status: status, + if clusterSpec != nil { + clusterSpec.Metadata.Region = c.Provider.Region() + } + + return c +} + +// IsSupportedRegion check if given region is supported +func (c *ClusterProvider) IsSupportedRegion() bool { + for _, supportedRegion := range api.SupportedRegions() { + if c.Provider.Region() == supportedRegion { + return true + } } + return false } // GetCredentialsEnv returns the AWS credentials for env usage @@ -148,12 +171,12 @@ func (c *ClusterProvider) EnsureAMI(ng *api.NodeGroup) error { ami.DefaultResolvers = []ami.Resolver{ami.NewAutoResolver(c.Provider.EC2())} } if ng.AMI == ami.ResolverStatic || ng.AMI == ami.ResolverAuto { - id, err := ami.Resolve(c.Spec.Region, ng.InstanceType, ng.AMIFamily) + id, err := ami.Resolve(c.Provider.Region(), ng.InstanceType, ng.AMIFamily) if err != nil { return errors.Wrap(err, "Unable to determine AMI to use") } if id == "" { - return ami.NewErrFailedResolution(c.Spec.Region, ng.InstanceType, ng.AMIFamily) + return ami.NewErrFailedResolution(c.Provider.Region(), ng.InstanceType, ng.AMIFamily) } ng.AMI = id } @@ -174,37 +197,37 @@ func (c *ClusterProvider) EnsureAMI(ng *api.NodeGroup) error { } // SetAvailabilityZones sets the given (or chooses) the availability zones -func (c *ClusterProvider) SetAvailabilityZones(given []string) error { +func (c *ClusterProvider) SetAvailabilityZones(spec *api.ClusterConfig, given []string) error { if len(given) == 0 { logger.Debug("determining availability zones") azSelector := az.NewSelectorWithDefaults(c.Provider.EC2()) - if c.Spec.Region == api.EKSRegionUSEast1 { + if c.Provider.Region() == api.EKSRegionUSEast1 { azSelector = az.NewSelectorWithMinRequired(c.Provider.EC2()) } - zones, err := azSelector.SelectZones(c.Spec.Region) + zones, err := azSelector.SelectZones(c.Provider.Region()) if err != nil { return errors.Wrap(err, "getting availability zones") } logger.Info("setting availability zones to %v", zones) - c.Spec.AvailabilityZones = zones + spec.AvailabilityZones = zones return nil } if len(given) < az.MinRequiredAvailabilityZones { return fmt.Errorf("only %d zones specified %v, %d are required (can be non-unque)", len(given), given, az.MinRequiredAvailabilityZones) } - c.Spec.AvailabilityZones = given + spec.AvailabilityZones = given return nil } -func newSession(clusterConfig *api.ClusterConfig, endpoint string, credentials *credentials.Credentials) *session.Session { +func (c *ClusterProvider) newSession(spec *api.ProviderConfig, endpoint string, credentials *credentials.Credentials) *session.Session { // we might want to use bits from kops, although right now it seems like too many thing we // don't want yet // https://github.com/kubernetes/kops/blob/master/upup/pkg/fi/cloudup/awsup/aws_cloud.go#L179 config := aws.NewConfig() - if clusterConfig.Region != "" { - config = config.WithRegion(clusterConfig.Region) + if c.Provider.Region() != "" { + config = config.WithRegion(c.Provider.Region()) } config = config.WithCredentialsChainVerboseErrors(true) @@ -223,7 +246,7 @@ func newSession(clusterConfig *api.ClusterConfig, endpoint string, credentials * opts := session.Options{ Config: *config, SharedConfigState: session.SharedConfigEnable, - Profile: clusterConfig.Profile, + Profile: spec.Profile, AssumeRoleTokenProvider: stscreds.StdinTokenProvider, } @@ -239,15 +262,15 @@ func newSession(clusterConfig *api.ClusterConfig, endpoint string, credentials * s := session.Must(session.NewSessionWithOptions(opts)) - if clusterConfig.Region == "" { + if spec.Region == "" { if *s.Config.Region != "" { // set cluster config region, based on session config - clusterConfig.Region = *s.Config.Region + spec.Region = *s.Config.Region } else { // if session config doesn't have region set, make recursive call forcing default region logger.Debug("no region specified in flags or config, setting to %s", api.DefaultEKSRegion) - clusterConfig.Region = api.DefaultEKSRegion - return newSession(clusterConfig, endpoint, credentials) + spec.Region = api.DefaultEKSRegion + return c.newSession(spec, endpoint, credentials) } } @@ -255,6 +278,6 @@ func newSession(clusterConfig *api.ClusterConfig, endpoint string, credentials * } // NewStackManager returns a new stack manager -func (c *ClusterProvider) NewStackManager() *manager.StackCollection { - return manager.NewStackCollection(c.Provider, c.Spec) +func (c *ClusterProvider) NewStackManager(spec *api.ClusterConfig) *manager.StackCollection { + return manager.NewStackCollection(c.Provider, spec) } diff --git a/pkg/eks/api/api.go b/pkg/eks/api/api.go index 7deeb70130..2f59ba9e7f 100644 --- a/pkg/eks/api/api.go +++ b/pkg/eks/api/api.go @@ -1,6 +1,7 @@ package api import ( + "fmt" "time" "github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface" @@ -41,22 +42,44 @@ var DefaultWaitTimeout = 20 * time.Minute // DefaultNodeCount defines the default number of nodes to be created const DefaultNodeCount = 2 -// ClusterProvider provides an interface with the needed AWS APIs +// ClusterMeta is what identifies a cluster +type ClusterMeta struct { + Name string + Region string + Tags map[string]string +} + +// String returns canonical representation of ClusterMeta +func (c *ClusterMeta) String() string { + return fmt.Sprintf("%s.%s.eksctl.io", c.Name, c.Region) +} + +// LogString returns representation of ClusterMeta for logs +func (c *ClusterMeta) LogString() string { + return fmt.Sprintf("EKS cluster %q in %q region", c.Name, c.Region) +} + +// ClusterProvider is the interface to AWS APIs type ClusterProvider interface { CloudFormation() cloudformationiface.CloudFormationAPI EKS() eksiface.EKSAPI EC2() ec2iface.EC2API STS() stsiface.STSAPI + Region() string + Profile() string + WaitTimeout() time.Duration } -// ClusterConfig is a simple config, to be replaced with Cluster API -type ClusterConfig struct { +// ProviderConfig holds global parameters for all interactions with AWS APIs +type ProviderConfig struct { Region string Profile string - Tags map[string]string - ClusterName string - WaitTimeout time.Duration +} + +// ClusterConfig is a simple config, to be replaced with Cluster API +type ClusterConfig struct { + Metadata *ClusterMeta VPC *ClusterVPC @@ -78,7 +101,8 @@ type ClusterConfig struct { // call NewNodeGroup to create one func NewClusterConfig() *ClusterConfig { cfg := &ClusterConfig{ - VPC: &ClusterVPC{}, + Metadata: &ClusterMeta{}, + VPC: &ClusterVPC{}, } cidr := DefaultCIDR() @@ -97,16 +121,6 @@ func (c *ClusterConfig) AppendAvailabilityZone(newAZ string) { c.AvailabilityZones = append(c.AvailabilityZones, newAZ) } -// IsSupportedRegion check if given region is supported -func (c *ClusterConfig) IsSupportedRegion() bool { - for _, supportedRegion := range SupportedRegions() { - if c.Region == supportedRegion { - return true - } - } - return false -} - // NewNodeGroup crears new nodegroup inside cluster config, // it returns pointer to the nodegroup for convenience func (c *ClusterConfig) NewNodeGroup() *NodeGroup { diff --git a/pkg/eks/auth.go b/pkg/eks/auth.go index 130672bad6..ee63bbe4d5 100644 --- a/pkg/eks/auth.go +++ b/pkg/eks/auth.go @@ -28,8 +28,8 @@ import ( "github.com/weaveworks/eksctl/pkg/utils" ) -func (c *ClusterProvider) getKeyPairName(ng *api.NodeGroup, fingerprint *string) string { - keyNameParts := []string{"eksctl", c.Spec.ClusterName} +func (c *ClusterProvider) getKeyPairName(clusterName string, ng *api.NodeGroup, fingerprint *string) string { + keyNameParts := []string{"eksctl", clusterName} if ng != nil { keyNameParts = append(keyNameParts, fmt.Sprintf("ng%d", ng.ID)) } @@ -65,12 +65,12 @@ func (c *ClusterProvider) tryExistingSSHPublicKeyFromPath(ng *api.NodeGroup) err return nil } -func (c *ClusterProvider) importSSHPublicKeyIfNeeded(ng *api.NodeGroup) error { +func (c *ClusterProvider) importSSHPublicKeyIfNeeded(clusterName string, ng *api.NodeGroup) error { fingerprint, err := pki.ComputeAWSKeyFingerprint(string(ng.SSHPublicKey)) if err != nil { return err } - ng.SSHPublicKeyName = c.getKeyPairName(ng, &fingerprint) + ng.SSHPublicKeyName = c.getKeyPairName(clusterName, ng, &fingerprint) existing, err := c.getKeyPair(ng.SSHPublicKeyName) if err != nil { if strings.HasPrefix(err.Error(), "cannot find EC2 key pair") { @@ -94,7 +94,7 @@ func (c *ClusterProvider) importSSHPublicKeyIfNeeded(ng *api.NodeGroup) error { } // LoadSSHPublicKey loads the given SSH public key -func (c *ClusterProvider) LoadSSHPublicKey(ng *api.NodeGroup) error { +func (c *ClusterProvider) LoadSSHPublicKey(clusterName string, ng *api.NodeGroup) error { if !ng.AllowSSH { // TODO: https://github.com/weaveworks/eksctl/issues/144 return nil @@ -110,21 +110,21 @@ func (c *ClusterProvider) LoadSSHPublicKey(ng *api.NodeGroup) error { } // on successful read – import it ng.SSHPublicKey = sshPublicKey - if err := c.importSSHPublicKeyIfNeeded(ng); err != nil { + if err := c.importSSHPublicKeyIfNeeded(clusterName, ng); err != nil { return err } return nil } // MaybeDeletePublicSSHKey will delete the public SSH key, if it exists -func (c *ClusterProvider) MaybeDeletePublicSSHKey() { +func (c *ClusterProvider) MaybeDeletePublicSSHKey(clusterName string) { existing, err := c.Provider.EC2().DescribeKeyPairs(&ec2.DescribeKeyPairsInput{}) if err != nil { logger.Debug("cannot describe keys: %v", err) return } matching := []*string{} - prefix := c.getKeyPairName(nil, nil) + prefix := c.getKeyPairName(clusterName, nil, nil) logger.Debug("existing = %#v", existing) for _, e := range existing.KeyPairs { if strings.HasPrefix(*e.KeyName, prefix) { @@ -163,20 +163,22 @@ type ClientConfig struct { ContextName string roleARN string sts stsiface.STSAPI + profile string } // NewClientConfig creates a new client config // based on "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" // these are small, so we can copy these, and no need to deal with k/k as dependency -func (c *ClusterProvider) NewClientConfig() (*ClientConfig, error) { - client, clusterName, contextName := kubeconfig.New(c.Spec, c.getUsername(), "") +func (c *ClusterProvider) NewClientConfig(spec *api.ClusterConfig) (*ClientConfig, error) { + client, clusterName, contextName := kubeconfig.New(spec, c.getUsername(), "") clientConfig := &ClientConfig{ - Cluster: c.Spec, + Cluster: spec, Client: client, ClusterName: clusterName, ContextName: contextName, roleARN: c.Status.iamRoleARN, sts: c.Provider.STS(), + profile: c.Provider.Profile(), } return clientConfig, nil @@ -189,11 +191,11 @@ func (c *ClientConfig) WithExecAuthenticator() *ClientConfig { kubeconfig.AppendAuthenticator(clientConfigCopy.Client, c.Cluster, utils.DetectAuthenticator()) - if len(c.Cluster.Profile) > 0 { + if len(c.profile) > 0 { clientConfigCopy.Client.AuthInfos[c.ContextName].Exec.Env = []clientcmdapi.ExecEnvVar{ clientcmdapi.ExecEnvVar{ Name: "AWS_PROFILE", - Value: c.Cluster.Profile, + Value: c.profile, }, } } @@ -210,7 +212,7 @@ func (c *ClientConfig) WithEmbeddedToken() (*ClientConfig, error) { return nil, errors.Wrap(err, "could not get token generator") } - tok, err := gen.GetWithSTS(c.Cluster.ClusterName, c.sts.(*sts.STS)) + tok, err := gen.GetWithSTS(c.Cluster.Metadata.Name, c.sts.(*sts.STS)) if err != nil { return nil, errors.Wrap(err, "could not get token") } diff --git a/pkg/eks/eks.go b/pkg/eks/eks.go index ae6c5d5349..43c69eea2f 100644 --- a/pkg/eks/eks.go +++ b/pkg/eks/eks.go @@ -19,15 +19,10 @@ import ( "github.com/kubicorn/kubicorn/pkg/logger" ) -type clusterWithRegion struct { - Name string - Region string -} - // DescribeControlPlane describes the cluster control plane -func (c *ClusterProvider) DescribeControlPlane() (*awseks.Cluster, error) { +func (c *ClusterProvider) DescribeControlPlane(cl *api.ClusterMeta) (*awseks.Cluster, error) { input := &awseks.DescribeClusterInput{ - Name: &c.Spec.ClusterName, + Name: &cl.Name, } output, err := c.Provider.EKS().DescribeCluster(input) if err != nil { @@ -37,8 +32,8 @@ func (c *ClusterProvider) DescribeControlPlane() (*awseks.Cluster, error) { } // DeprecatedDeleteControlPlane deletes the control plane -func (c *ClusterProvider) DeprecatedDeleteControlPlane() error { - cluster, err := c.DescribeControlPlane() +func (c *ClusterProvider) DeprecatedDeleteControlPlane(cl *api.ClusterMeta) error { + cluster, err := c.DescribeControlPlane(cl) if err != nil { return errors.Wrap(err, "not able to get control plane for deletion") } @@ -54,20 +49,20 @@ func (c *ClusterProvider) DeprecatedDeleteControlPlane() error { } // GetCredentials retrieves the certificate authority data -func (c *ClusterProvider) GetCredentials(cluster awseks.Cluster) error { - c.Spec.Endpoint = *cluster.Endpoint +func (c *ClusterProvider) GetCredentials(cluster awseks.Cluster, spec *api.ClusterConfig) error { + spec.Endpoint = *cluster.Endpoint data, err := base64.StdEncoding.DecodeString(*cluster.CertificateAuthority.Data) if err != nil { return errors.Wrap(err, "decoding certificate authority data") } - c.Spec.CertificateAuthorityData = data + spec.CertificateAuthorityData = data return nil } // ListClusters display details of all the EKS cluster in your account -func (c *ClusterProvider) ListClusters(chunkSize int, output string, eachRegion bool) error { +func (c *ClusterProvider) ListClusters(clusterName string, chunkSize int, output string, eachRegion bool) error { // NOTE: this needs to be reworked in the future so that the functionality // is combined. This require the ability to return details of all clusters // in a single call. @@ -76,17 +71,17 @@ func (c *ClusterProvider) ListClusters(chunkSize int, output string, eachRegion return err } - if c.Spec.ClusterName != "" { + if clusterName != "" { if output == "table" { addSummaryTableColumns(printer.(*printers.TablePrinter)) } - return c.doGetCluster(&c.Spec.ClusterName, printer) + return c.doGetCluster(clusterName, printer) } if output == "table" { addListTableColumns(printer.(*printers.TablePrinter)) } - allClusters := []*clusterWithRegion{} + allClusters := []*api.ClusterMeta{} if err := c.doListClusters(int64(chunkSize), printer, &allClusters, eachRegion); err != nil { return err } @@ -105,12 +100,16 @@ func (c *ClusterProvider) getClustersRequest(chunkSize int64, nextToken string) return output.Clusters, output.NextToken, nil } -func (c *ClusterProvider) doListClusters(chunkSize int64, printer printers.OutputPrinter, allClusters *[]*clusterWithRegion, eachRegion bool) error { +func (c *ClusterProvider) doListClusters(chunkSize int64, printer printers.OutputPrinter, allClusters *[]*api.ClusterMeta, eachRegion bool) error { if eachRegion { // reset region and re-create the client, then make a recursive call for _, region := range api.SupportedRegions() { - c.Spec.Region = region - if err := New(c.Spec).doListClusters(chunkSize, printer, allClusters, false); err != nil { + spec := &api.ProviderConfig{ + Region: region, + Profile: c.Provider.Profile(), + WaitTimeout: c.Provider.WaitTimeout(), + } + if err := New(spec, nil).doListClusters(chunkSize, printer, allClusters, false); err != nil { return err } } @@ -125,9 +124,9 @@ func (c *ClusterProvider) doListClusters(chunkSize int64, printer printers.Outpu } for _, clusterName := range clusters { - *allClusters = append(*allClusters, &clusterWithRegion{ + *allClusters = append(*allClusters, &api.ClusterMeta{ Name: *clusterName, - Region: c.Spec.Region, + Region: c.Provider.Region(), }) } @@ -141,13 +140,13 @@ func (c *ClusterProvider) doListClusters(chunkSize int64, printer printers.Outpu return nil } -func (c *ClusterProvider) doGetCluster(clusterName *string, printer printers.OutputPrinter) error { +func (c *ClusterProvider) doGetCluster(clusterName string, printer printers.OutputPrinter) error { input := &awseks.DescribeClusterInput{ - Name: clusterName, + Name: &clusterName, } output, err := c.Provider.EKS().DescribeCluster(input) if err != nil { - return errors.Wrapf(err, "unable to describe control plane %q", *clusterName) + return errors.Wrapf(err, "unable to describe control plane %q", clusterName) } logger.Debug("cluster = %#v", output) @@ -159,9 +158,10 @@ func (c *ClusterProvider) doGetCluster(clusterName *string, printer printers.Out if *output.Cluster.Status == awseks.ClusterStatusActive { if logger.Level >= 4 { - stacks, err := c.NewStackManager().ListReadyStacks(fmt.Sprintf("^(eksclt|EKS)-%s-.*$", *clusterName)) + spec := &api.ClusterConfig{Metadata: &api.ClusterMeta{Name: clusterName}} + stacks, err := c.NewStackManager(spec).ListReadyStacks(fmt.Sprintf("^(eksclt|EKS)-%s-.*$", clusterName)) if err != nil { - return errors.Wrapf(err, "listing CloudFormation stack for %q", *clusterName) + return errors.Wrapf(err, "listing CloudFormation stack for %q", clusterName) } for _, s := range stacks { logger.Debug("stack = %#v", *s) @@ -178,7 +178,7 @@ func (c *ClusterProvider) ListAllTaggedResources() error { } // WaitForControlPlane waits till the control plane is ready -func (c *ClusterProvider) WaitForControlPlane(clientSet *kubernetes.Clientset) error { +func (c *ClusterProvider) WaitForControlPlane(id *api.ClusterMeta, clientSet *kubernetes.Clientset) error { if _, err := clientSet.ServerVersion(); err == nil { return nil } @@ -186,7 +186,7 @@ func (c *ClusterProvider) WaitForControlPlane(clientSet *kubernetes.Clientset) e ticker := time.NewTicker(20 * time.Second) defer ticker.Stop() - timer := time.NewTimer(c.Spec.WaitTimeout) + timer := time.NewTimer(c.Provider.WaitTimeout()) defer timer.Stop() for { @@ -198,7 +198,7 @@ func (c *ClusterProvider) WaitForControlPlane(clientSet *kubernetes.Clientset) e } logger.Debug("control plane not ready yet – %s", err.Error()) case <-timer.C: - return fmt.Errorf("timed out waiting for control plane %q after %s", c.Spec.ClusterName, c.Spec.WaitTimeout) + return fmt.Errorf("timed out waiting for control plane %q after %s", id.Name, c.Provider.WaitTimeout()) } } } @@ -240,10 +240,10 @@ func addSummaryTableColumns(printer *printers.TablePrinter) { } func addListTableColumns(printer *printers.TablePrinter) { - printer.AddColumn("NAME", func(c *clusterWithRegion) string { + printer.AddColumn("NAME", func(c *api.ClusterMeta) string { return c.Name }) - printer.AddColumn("REGION", func(c *clusterWithRegion) string { + printer.AddColumn("REGION", func(c *api.ClusterMeta) string { return c.Region }) } diff --git a/pkg/eks/eks_test.go b/pkg/eks/eks_test.go index 2b5ff5ef5a..3d382aecba 100644 --- a/pkg/eks/eks_test.go +++ b/pkg/eks/eks_test.go @@ -14,7 +14,6 @@ import ( . "github.com/onsi/gomega" "github.com/stretchr/testify/mock" . "github.com/weaveworks/eksctl/pkg/eks" - "github.com/weaveworks/eksctl/pkg/eks/api" "github.com/weaveworks/eksctl/pkg/testutils" ) @@ -42,9 +41,6 @@ var _ = Describe("Eks", func() { p = testutils.NewMockProvider() c = &ClusterProvider{ - Spec: &api.ClusterConfig{ - ClusterName: clusterName, - }, Provider: p, } @@ -61,7 +57,7 @@ var _ = Describe("Eks", func() { }) JustBeforeEach(func() { - err = c.ListClusters(100, output, false) + err = c.ListClusters(clusterName, 100, output, false) }) It("should not error", func() { @@ -92,7 +88,7 @@ var _ = Describe("Eks", func() { }) JustBeforeEach(func() { - err = c.ListClusters(100, output, false) + err = c.ListClusters(clusterName, 100, output, false) }) It("should not error", func() { @@ -129,9 +125,6 @@ var _ = Describe("Eks", func() { p = testutils.NewMockProvider() c = &ClusterProvider{ - Spec: &api.ClusterConfig{ - ClusterName: clusterName, - }, Provider: p, } @@ -143,7 +136,7 @@ var _ = Describe("Eks", func() { }) JustBeforeEach(func() { - err = c.ListClusters(100, output, false) + err = c.ListClusters(clusterName, 100, output, false) }) AfterEach(func() { @@ -199,11 +192,10 @@ var _ = Describe("Eks", func() { p = testutils.NewMockProvider() c = &ClusterProvider{ - Spec: &api.ClusterConfig{}, Provider: p, } - mockResultFn := func(input *awseks.ListClustersInput) *awseks.ListClustersOutput { + mockResultFn := func(_ *awseks.ListClustersInput) *awseks.ListClustersOutput { clusterName := fmt.Sprintf("cluster-%d", callNumber) output := &awseks.ListClustersOutput{ Clusters: []*string{aws.String(clusterName)}, @@ -212,7 +204,7 @@ var _ = Describe("Eks", func() { output.NextToken = aws.String("SOMERANDOMTOKEN") } - callNumber = callNumber + 1 + callNumber++ return output } @@ -222,7 +214,7 @@ var _ = Describe("Eks", func() { }) JustBeforeEach(func() { - err = c.ListClusters(chunkSize, output, false) + err = c.ListClusters("", chunkSize, output, false) }) It("should not error", func() { @@ -240,11 +232,10 @@ var _ = Describe("Eks", func() { p = testutils.NewMockProvider() c = &ClusterProvider{ - Spec: &api.ClusterConfig{}, Provider: p, } - mockResultFn := func(input *awseks.ListClustersInput) *awseks.ListClustersOutput { + mockResultFn := func(_ *awseks.ListClustersInput) *awseks.ListClustersOutput { output := &awseks.ListClustersOutput{ Clusters: []*string{aws.String("cluster-1"), aws.String("cluster-2")}, } @@ -257,7 +248,7 @@ var _ = Describe("Eks", func() { }) JustBeforeEach(func() { - err = c.ListClusters(chunkSize, output, false) + err = c.ListClusters("", chunkSize, output, false) }) It("should not error", func() { diff --git a/pkg/eks/nodegroup.go b/pkg/eks/nodegroup.go index ffe87afdec..8736de4e85 100644 --- a/pkg/eks/nodegroup.go +++ b/pkg/eks/nodegroup.go @@ -87,7 +87,7 @@ func (c *ClusterProvider) WaitForNodes(clientSet *clientset.Clientset, ng *api.N if ng.MinSize == 0 { return nil } - timer := time.After(c.Spec.WaitTimeout) + timer := time.After(c.Provider.WaitTimeout()) timeout := false watcher, err := clientSet.CoreV1().Nodes().Watch(metav1.ListOptions{}) if err != nil { @@ -120,7 +120,7 @@ func (c *ClusterProvider) WaitForNodes(clientSet *clientset.Clientset, ng *api.N } } if timeout { - return fmt.Errorf("timed out (after %s) waitiing for at least %d nodes to join the cluster and become ready", c.Spec.WaitTimeout, ng.MinSize) + return fmt.Errorf("timed out (after %s) waitiing for at least %d nodes to join the cluster and become ready", c.Provider.WaitTimeout(), ng.MinSize) } if _, err = getNodes(clientSet); err != nil { diff --git a/pkg/eks/vpc.go b/pkg/eks/vpc.go index c293ccdc9a..0f44583b7d 100644 --- a/pkg/eks/vpc.go +++ b/pkg/eks/vpc.go @@ -14,31 +14,31 @@ import ( // SetSubnets defines CIDRs for each of the subnets, // it must be called after SetAvailabilityZones -func (c *ClusterProvider) SetSubnets() error { +func (c *ClusterProvider) SetSubnets(spec *api.ClusterConfig) error { var err error - vpc := c.Spec.VPC + vpc := spec.VPC vpc.Subnets = map[api.SubnetTopology]map[string]api.Network{ api.SubnetTopologyPublic: map[string]api.Network{}, api.SubnetTopologyPrivate: map[string]api.Network{}, } - prefix, _ := c.Spec.VPC.CIDR.Mask.Size() + prefix, _ := spec.VPC.CIDR.Mask.Size() if (prefix < 16) || (prefix > 24) { return fmt.Errorf("VPC CIDR prefix must be betwee /16 and /24") } - zoneCIDRs, err := subnet.SplitInto8(c.Spec.VPC.CIDR) + zoneCIDRs, err := subnet.SplitInto8(spec.VPC.CIDR) if err != nil { return err } logger.Debug("VPC CIDR (%s) was divided into 8 subnets %v", vpc.CIDR.String(), zoneCIDRs) - zonesTotal := len(c.Spec.AvailabilityZones) + zonesTotal := len(spec.AvailabilityZones) if 2*zonesTotal > len(zoneCIDRs) { return fmt.Errorf("insufficient number of subnets (have %d, but need %d) for %d availability zones", len(zoneCIDRs), 2*zonesTotal, zonesTotal) } - for i, zone := range c.Spec.AvailabilityZones { + for i, zone := range spec.AvailabilityZones { public := zoneCIDRs[i] private := zoneCIDRs[i+zonesTotal] vpc.Subnets[api.SubnetTopologyPublic][zone] = api.Network{ @@ -54,7 +54,7 @@ func (c *ClusterProvider) SetSubnets() error { } // UseSubnets imports -func (c *ClusterProvider) UseSubnets(topology api.SubnetTopology, subnetIDs []string) error { +func (c *ClusterProvider) UseSubnets(spec *api.ClusterConfig, topology api.SubnetTopology, subnetIDs []string) error { if len(subnetIDs) == 0 { return nil } @@ -67,14 +67,14 @@ func (c *ClusterProvider) UseSubnets(topology api.SubnetTopology, subnetIDs []st } for _, subnet := range output.Subnets { - if c.Spec.VPC.ID == "" { - c.Spec.VPC.ID = *subnet.VpcId - } else if c.Spec.VPC.ID != *subnet.VpcId { + if spec.VPC.ID == "" { + spec.VPC.ID = *subnet.VpcId + } else if spec.VPC.ID != *subnet.VpcId { return fmt.Errorf("given subnets (%s) are not in the same VPC", strings.Join(subnetIDs, ", ")) } - c.Spec.ImportSubnet(topology, *subnet.AvailabilityZone, *subnet.SubnetId) - c.Spec.AppendAvailabilityZone(*subnet.AvailabilityZone) + spec.ImportSubnet(topology, *subnet.AvailabilityZone, *subnet.SubnetId) + spec.AppendAvailabilityZone(*subnet.AvailabilityZone) } return nil diff --git a/pkg/nodebootstrap/userdata.go b/pkg/nodebootstrap/userdata.go index 5e8689e2a7..4908fb18ea 100644 --- a/pkg/nodebootstrap/userdata.go +++ b/pkg/nodebootstrap/userdata.go @@ -106,8 +106,8 @@ func makeKubeletParams(spec *api.ClusterConfig, nodeGroupID int) []string { func makeMetadata(spec *api.ClusterConfig) []string { return []string{ - fmt.Sprintf("AWS_DEFAULT_REGION=%s", spec.Region), - fmt.Sprintf("AWS_EKS_CLUSTER_NAME=%s", spec.ClusterName), + fmt.Sprintf("AWS_DEFAULT_REGION=%s", spec.Metadata.Region), + fmt.Sprintf("AWS_EKS_CLUSTER_NAME=%s", spec.Metadata.Name), fmt.Sprintf("AWS_EKS_ENDPOINT=%s", spec.Endpoint), } } diff --git a/pkg/testutils/mock_provider.go b/pkg/testutils/mock_provider.go index fa90de496a..652fa1578f 100644 --- a/pkg/testutils/mock_provider.go +++ b/pkg/testutils/mock_provider.go @@ -1,10 +1,13 @@ package testutils import ( + "time" + "github.com/aws/aws-sdk-go/service/cloudformation/cloudformationiface" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/aws-sdk-go/service/eks/eksiface" "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/weaveworks/eksctl/pkg/eks/api" "github.com/weaveworks/eksctl/pkg/eks/mocks" ) @@ -26,6 +29,13 @@ func NewMockProvider() *MockProvider { } } +// ProviderConfig holds current global config +var ProviderConfig = &api.ProviderConfig{ + Region: api.DefaultEKSRegion, + Profile: "default", + WaitTimeout: 1200000000000, +} + // CloudFormation returns a representation of the CloudFormation API func (m MockProvider) CloudFormation() cloudformationiface.CloudFormationAPI { return m.cfn } @@ -51,3 +61,12 @@ func (m MockProvider) STS() stsiface.STSAPI { return m.sts } // MockSTS returns a mocked STS API func (m MockProvider) MockSTS() *mocks.STSAPI { return m.STS().(*mocks.STSAPI) } + +// Profile returns current profile setting +func (m MockProvider) Profile() string { return ProviderConfig.Profile } + +// Region returns current region setting +func (m MockProvider) Region() string { return ProviderConfig.Region } + +// WaitTimeout returns current timeout setting +func (m MockProvider) WaitTimeout() time.Duration { return ProviderConfig.WaitTimeout } diff --git a/pkg/utils/kubeconfig/kubeconfig.go b/pkg/utils/kubeconfig/kubeconfig.go index a06186a17e..a07cd71f09 100644 --- a/pkg/utils/kubeconfig/kubeconfig.go +++ b/pkg/utils/kubeconfig/kubeconfig.go @@ -31,7 +31,7 @@ const ( // if certificateAuthorityPath is no empty, it is used instead of // embedded certificate-authority-data func New(spec *api.ClusterConfig, username, certificateAuthorityPath string) (*clientcmdapi.Config, string, string) { - clusterName := getCompleteClusterName(spec) + clusterName := spec.Metadata.String() contextName := fmt.Sprintf("%s@%s", username, clusterName) c := &clientcmdapi.Config{ @@ -66,7 +66,7 @@ func AppendAuthenticator(c *clientcmdapi.Config, spec *api.ClusterConfig, comman c.AuthInfos[c.CurrentContext].Exec = &clientcmdapi.ExecConfig{ APIVersion: "client.authentication.k8s.io/v1alpha1", Command: command, - Args: []string{"token", "-i", spec.ClusterName}, + Args: []string{"token", "-i", spec.Metadata.Name}, /* Args: []string{"token", "-i", c.Cluster.ClusterName, "-r", c.roleARN}, */ @@ -100,10 +100,6 @@ func Write(path string, newConfig *clientcmdapi.Config, setContext bool) (string return configAccess.GetDefaultFilename(), nil } -func getCompleteClusterName(spec *api.ClusterConfig) string { - return fmt.Sprintf("%s.%s.eksctl.io", spec.ClusterName, spec.Region) -} - func getConfigAccess(explicitPath string) clientcmd.ConfigAccess { pathOptions := clientcmd.NewDefaultPathOptions() if explicitPath != "" && explicitPath != DefaultPath { @@ -157,8 +153,8 @@ func isValidConfig(p, name string) error { } // MaybeDeleteConfig will delete the auto-generated kubeconfig, if it exists -func MaybeDeleteConfig(ctl *api.ClusterConfig) { - p := AutoPath(ctl.ClusterName) +func MaybeDeleteConfig(cl *api.ClusterMeta) { + p := AutoPath(cl.Name) autoConfExists, err := utils.FileExists(p) if err != nil { @@ -166,7 +162,7 @@ func MaybeDeleteConfig(ctl *api.ClusterConfig) { return } if autoConfExists { - if err = isValidConfig(p, ctl.ClusterName); err != nil { + if err = isValidConfig(p, cl.Name); err != nil { logger.Debug(err.Error()) return } @@ -183,7 +179,7 @@ func MaybeDeleteConfig(ctl *api.ClusterConfig) { return } - if !deleteClusterInfo(config, ctl) { + if !deleteClusterInfo(config, cl) { return } @@ -197,9 +193,9 @@ func MaybeDeleteConfig(ctl *api.ClusterConfig) { // deleteClusterInfo removes a cluster's information from the kubeconfig if the cluster name // provided by ctl matches a eksctl-created cluster in the kubeconfig // returns 'true' if the existing config has changes and 'false' otherwise -func deleteClusterInfo(existing *clientcmdapi.Config, ctl *api.ClusterConfig) bool { +func deleteClusterInfo(existing *clientcmdapi.Config, cl *api.ClusterMeta) bool { isChanged := false - clusterName := getCompleteClusterName(ctl) + clusterName := cl.String() if existing.Clusters[clusterName] != nil { delete(existing.Clusters, clusterName) diff --git a/pkg/utils/kubeconfig/kubeconfig_test.go b/pkg/utils/kubeconfig/kubeconfig_test.go index 0d1c42a84c..9808670fb8 100644 --- a/pkg/utils/kubeconfig/kubeconfig_test.go +++ b/pkg/utils/kubeconfig/kubeconfig_test.go @@ -131,10 +131,11 @@ var _ = Describe("Kubeconfig", func() { Context("delete config", func() { // Default cluster name is 'foo' and region is 'us-west-2' var apiClusterConfigSample = eksctlapi.ClusterConfig{ - Region: "us-west-2", - Profile: "", - Tags: map[string]string{}, - ClusterName: "foo", + Metadata: &eksctlapi.ClusterMeta{ + Region: "us-west-2", + Name: "foo", + Tags: map[string]string{}, + }, NodeGroups: []*eksctlapi.NodeGroup{ { AMI: "", @@ -160,7 +161,6 @@ var _ = Describe("Kubeconfig", func() { }, SecurityGroup: "", }, - WaitTimeout: 1200000000000, Endpoint: "", CertificateAuthorityData: []uint8(nil), ARN: "", @@ -179,7 +179,7 @@ var _ = Describe("Kubeconfig", func() { // Returns an ClusterConfig with a cluster name equal to the provided clusterName. GetClusterConfig := func(clusterName string) *eksctlapi.ClusterConfig { apiClusterConfig := apiClusterConfigSample - apiClusterConfig.ClusterName = clusterName + apiClusterConfig.Metadata.Name = clusterName return &apiClusterConfig } @@ -223,7 +223,7 @@ var _ = Describe("Kubeconfig", func() { It("removes a cluster from the kubeconfig if the kubeconfig file includes the cluster", func() { existingClusterConfig := GetClusterConfig("cluster-two") - kubeconfig.MaybeDeleteConfig(existingClusterConfig) + kubeconfig.MaybeDeleteConfig(existingClusterConfig.Metadata) configFileAsBytes, err := ioutil.ReadFile(configFile.Name()) Expect(err).To(BeNil()) @@ -232,7 +232,7 @@ var _ = Describe("Kubeconfig", func() { It("not change the kubeconfig if the kubeconfig does not include the cluster", func() { nonExistentClusterConfig := GetClusterConfig("not-a-cluster") - kubeconfig.MaybeDeleteConfig(nonExistentClusterConfig) + kubeconfig.MaybeDeleteConfig(nonExistentClusterConfig.Metadata) configFileAsBytes, err := ioutil.ReadFile(configFile.Name()) Expect(err).To(BeNil())