Skip to content
This repository has been archived by the owner on May 7, 2024. It is now read-only.

Commit

Permalink
Merge pull request #602 from giantswarm/add-azure-autoscaler
Browse files Browse the repository at this point in the history
  • Loading branch information
axbarsan committed Jan 12, 2021
2 parents 90d5e28 + f3a34d8 commit 1ffab2c
Show file tree
Hide file tree
Showing 8 changed files with 45 additions and 46 deletions.
30 changes: 8 additions & 22 deletions commands/create/nodepool/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ follows:
- Availability zones: the node pool will use 1 zone selected randomly.
- Instance type (AWS) / VM Size (Azure): the default machine type of the installation will be
used. Check 'gsctl info' to find out what that is.
- Scaling settings: on AWS, the minimum will be 3 and maximum 10 nodes, and on Azure, the node count will be 3
- Scaling settings: the minimum will be 3 and the maximum 10 nodes.
Examples:
Expand Down Expand Up @@ -91,18 +91,12 @@ Examples:
# Node pool scaling:
# AWS
The initial node pool size is set by adjusting the lower and upper
size limit like this:
gsctl create nodepool f01r4 --nodes-min 3 --nodes-max 10
# Azure
The number of nodes is configured by setting both the lower and upper size limit to the same value:
gsctl create nodepool f01r4 --nodes-min 3 --nodes-max 3
# Spot instances (AWS only):
To use 50% spot instances in a node pool and making sure to always have
three on-demand instances you can create your node pool like this:
Expand All @@ -111,8 +105,6 @@ Examples:
--aws-on-demand-base-capacity 3 \
--aws-spot-percentage 50
# Spot instances (AWS only):
To use similar instances in your node pool to the one that you defined
you can create your node pool like this (the list is maintained by
Giant Swarm for now eg. if you select m5.xlarge the node pool can fall
Expand Down Expand Up @@ -305,24 +297,18 @@ func verifyPreconditions(args Arguments) error {
return microerror.Maskf(errors.ConflictingFlagsError, "the flags --aws-instance-type and --azure-vm-size cannot be combined.")
}

switch args.Provider {
case provider.AWS:
// Scaling flags plausibility
if args.ScalingMin > 0 && args.ScalingMax > 0 {
if args.ScalingMin > args.ScalingMax {
return microerror.Mask(errors.WorkersMinMaxInvalidError)
}
// Scaling flags plausibility.
if args.ScalingMin > 0 && args.ScalingMax > 0 {
if args.ScalingMin > args.ScalingMax {
return microerror.Mask(errors.WorkersMinMaxInvalidError)
}
}

if args.Provider == provider.AWS {
// SpotPercentage check percentage
if args.SpotPercentage < 0 || args.SpotPercentage > 100 {
return microerror.Mask(errors.NotPercentage)
}

case provider.Azure:
if args.ScalingMin != args.ScalingMax {
return microerror.Maskf(errors.WorkersMinMaxInvalidError, "Provider '%s' does not support node pool autoscaling.", args.Provider)
}
}

return nil
Expand Down
2 changes: 1 addition & 1 deletion commands/create/nodepool/command_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ func TestVerifyPreconditions(t *testing.T) {
ClusterNameOrID: "cluster-id",
VmSize: "something-also-big",
ScalingMin: 3,
ScalingMax: 10,
ScalingMax: 1,
Provider: "azure",
},
errors.IsWorkersMinMaxInvalid,
Expand Down
7 changes: 7 additions & 0 deletions commands/list/nodepools/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,7 @@ func getOutputAzure(nps []*models.V5GetNodePoolsResponseItems) (string, error) {
color.CyanString("NAME"),
color.CyanString("AZ"),
color.CyanString("VM SIZE"),
color.CyanString("NODES MIN/MAX"),
color.CyanString("NODES DESIRED"),
color.CyanString("NODES READY"),
color.CyanString("CPUS"),
Expand Down Expand Up @@ -389,11 +390,17 @@ func getOutputAzure(nps []*models.V5GetNodePoolsResponseItems) (string, error) {
}
}

scalingMin := int64(0)
if np.Scaling.Min != nil {
scalingMin = *np.Scaling.Min
}

table = append(table, strings.Join([]string{
np.ID,
np.Name,
formatting.AvailabilityZonesList(np.AvailabilityZones),
vmSizes,
fmt.Sprintf("%s/%s", strconv.FormatInt(scalingMin, 10), strconv.FormatInt(np.Scaling.Max, 10)),
strconv.FormatInt(np.Status.Nodes, 10),
formatNodesReady(np.Status.Nodes, np.Status.NodesReady),
sumCPUs,
Expand Down
16 changes: 8 additions & 8 deletions commands/list/nodepools/command_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,21 +73,21 @@ a6bf4 New node pool C m5.2xlarge false 0 100
},
{
npResponse: `[
{"id": "np001-1", "name": "np001-1", "availability_zones": ["2"], "scaling": {"min": -1, "max": -1}, "node_spec": {"azure": {"vm_size": "Standard_D4s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}}
{"id": "np001-1", "name": "np001-1", "availability_zones": ["2"], "scaling": {"min": 1, "max": 2}, "node_spec": {"azure": {"vm_size": "Standard_D4s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}}
]`,
outputFormat: "table",
output: `ID NAME AZ VM SIZE NODES DESIRED NODES READY CPUS RAM (GB)
np001-1 np001-1 2 Standard_D4s_v3 0 0 0 0.0`,
output: `ID NAME AZ VM SIZE NODES MIN/MAX NODES DESIRED NODES READY CPUS RAM (GB)
np001-1 np001-1 2 Standard_D4s_v3 1/2 0 0 0 0.0`,
},
{
npResponse: `[
{"id": "np002-1", "name": "np002-1", "availability_zones": ["1"], "scaling": {"min": -1, "max": -1}, "node_spec": {"azure": {"vm_size": "Standard_D4s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}},
{"id": "np002-2", "name": "np002-2", "availability_zones": ["2", "3"], "scaling": {"min": -1, "max": -1}, "node_spec": {"azure": {"vm_size": "Standard_D4s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}}
{"id": "np002-1", "name": "np002-1", "availability_zones": ["1"], "scaling": {"min": 1, "max": 2}, "node_spec": {"azure": {"vm_size": "Standard_D4s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}},
{"id": "np002-2", "name": "np002-2", "availability_zones": ["2", "3"], "scaling": {"min": 1, "max": 3}, "node_spec": {"azure": {"vm_size": "Standard_D4s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}}
]`,
outputFormat: "table",
output: `ID NAME AZ VM SIZE NODES DESIRED NODES READY CPUS RAM (GB)
np002-1 np002-1 1 Standard_D4s_v3 0 0 0 0.0
np002-2 np002-2 2,3 Standard_D4s_v3 0 0 0 0.0`,
output: `ID NAME AZ VM SIZE NODES MIN/MAX NODES DESIRED NODES READY CPUS RAM (GB)
np002-1 np002-1 1 Standard_D4s_v3 1/2 0 0 0 0.0
np002-2 np002-2 2,3 Standard_D4s_v3 1/3 0 0 0 0.0`,
},
{
npResponse: `[
Expand Down
14 changes: 14 additions & 0 deletions commands/show/nodepool/azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ func getOutputAzure(nodePool *models.V5GetNodePoolResponse) (string, error) {
table = append(table, color.YellowString("Name:")+"|"+nodePool.Name)
table = append(table, color.YellowString("Node VM sizes:")+"|"+formatVMSizeAzure(vmSizes, vmSizeDetails))
table = append(table, color.YellowString("Availability zones:")+"|"+formatAZsAzure(nodePool.AvailabilityZones))
table = append(table, color.YellowString("Node scaling:")+"|"+formatNodeScalingAzure(nodePool.Scaling))
table = append(table, color.YellowString("Nodes desired:")+fmt.Sprintf("|%d", nodePool.Status.Nodes))
table = append(table, color.YellowString("Nodes in state Ready:")+fmt.Sprintf("|%d", nodePool.Status.NodesReady))
table = append(table, color.YellowString("CPUs:")+"|"+formatCPUsAzure(nodePool.Status.NodesReady, vmSizeDetails))
Expand All @@ -61,6 +62,19 @@ func formatVMSizeAzure(vmSize string, details *nodespec.VMSize) string {
return fmt.Sprintf("%s %s", vmSize, color.RedString("(no information available on this vm size)"))
}

func formatNodeScalingAzure(scaling *models.V5GetNodePoolResponseScaling) string {
minScale := int64(0)
if scaling.Min != nil {
minScale = *scaling.Min
}

if minScale == scaling.Max {
return fmt.Sprintf("Pinned to %d", minScale)
}

return fmt.Sprintf("Autoscaling between %d and %d", minScale, scaling.Max)
}

func formatCPUsAzure(nodesReady int64, details *nodespec.VMSize) string {
if details != nil {
return strconv.FormatInt(nodesReady*details.NumberOfCores, 10)
Expand Down
2 changes: 2 additions & 0 deletions commands/show/nodepool/command_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ func Test_ShowNodePool(t *testing.T) {
"name": "Application servers",
"availability_zones": ["1", "2"],
"node_spec": {"azure": {"vm_size": "Standard_D4s_v3"}},
"scaling":{"min":2,"max":5},
"status": {"nodes": 3, "nodes_ready": 3}
}`,
},
Expand All @@ -58,6 +59,7 @@ func Test_ShowNodePool(t *testing.T) {
"name": "Application servers",
"availability_zones": ["1", "2"],
"node_spec": {"azure": {"vm_size": "weird_one"}},
"scaling":{"min":2,"max":5},
"status": {"nodes": 3, "nodes_ready": 3}
}`,
},
Expand Down
14 changes: 2 additions & 12 deletions commands/update/nodepool/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (
"github.com/spf13/cobra"

"github.com/giantswarm/gsctl/clustercache"
"github.com/giantswarm/gsctl/pkg/provider"

"github.com/giantswarm/gsctl/client"
"github.com/giantswarm/gsctl/commands/errors"
Expand Down Expand Up @@ -139,17 +138,8 @@ func verifyPreconditions(args Arguments) error {
if args.ScalingMin <= 0 && args.ScalingMax <= 0 && args.Name == "" {
return microerror.Maskf(errors.NoOpError, "Nothing to update.")
}

switch args.Provider {
case provider.AWS:
if args.ScalingMin > args.ScalingMax && args.ScalingMax > 0 {
return microerror.Mask(errors.WorkersMinMaxInvalidError)
}

case provider.Azure:
if args.ScalingMin != args.ScalingMax {
return microerror.Maskf(errors.WorkersMinMaxInvalidError, "Provider '%s' does not support node pool autoscaling.", args.Provider)
}
if args.ScalingMin > args.ScalingMax && args.ScalingMax > 0 {
return microerror.Mask(errors.WorkersMinMaxInvalidError)
}

return nil
Expand Down
6 changes: 3 additions & 3 deletions commands/update/nodepool/command_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,15 +183,15 @@ func Test_verifyPreconditions(t *testing.T) {
errorMatcher: errors.IsNoOpError,
},
{
name: "case 6: trying to provide unsupported arguments, on azure",
name: "case 6: bad scaling parameters, on azure",
args: Arguments{
AuthToken: "token",
APIEndpoint: "https://mock-url",
Provider: "azure",
ClusterNameOrID: "cluster-id",
NodePoolID: "abc",
ScalingMin: 1,
ScalingMax: 3,
ScalingMin: 3,
ScalingMax: 1,
},
errorMatcher: errors.IsWorkersMinMaxInvalid,
},
Expand Down

0 comments on commit 1ffab2c

Please sign in to comment.