Skip to content
This repository has been archived by the owner on May 7, 2024. It is now read-only.

Commit

Permalink
Revert "Added support for autoscaling on azure (#588)" (#595)
Browse files Browse the repository at this point in the history
This reverts commit b69d646
  • Loading branch information
axbarsan committed Oct 7, 2020
1 parent aee69f2 commit 4970574
Show file tree
Hide file tree
Showing 8 changed files with 47 additions and 37 deletions.
29 changes: 21 additions & 8 deletions commands/create/nodepool/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,12 +91,18 @@ Examples:
# Node pool scaling:
# AWS
The initial node pool size is set by adjusting the lower and upper
size limit like this:
gsctl create nodepool f01r4 --nodes-min 3 --nodes-max 10
# Spot instances (AWS only):
# Azure
The number of nodes is configured by setting both the lower and upper size limit to the same value:
gsctl create nodepool f01r4 --nodes-min 3 --nodes-max 3
To use 50% spot instances in a node pool and making sure to always have
three on-demand instances you can create your node pool like this:
Expand All @@ -105,6 +111,8 @@ Examples:
--aws-on-demand-base-capacity 3 \
--aws-spot-percentage 50
# Spot instances (AWS only):
To use similar instances in your node pool to the one that you defined
you can create your node pool like this (the list is maintained by
Giant Swarm for now eg. if you select m5.xlarge the node pool can fall
Expand Down Expand Up @@ -286,19 +294,24 @@ func verifyPreconditions(args Arguments) error {
return microerror.Maskf(errors.ConflictingFlagsError, "the flags --aws-instance-type and --azure-vm-size cannot be combined.")
}

// Scaling flags plausibility
if args.ScalingMin > 0 && args.ScalingMax > 0 {
if args.ScalingMin > args.ScalingMax {
return microerror.Mask(errors.WorkersMinMaxInvalidError)
}
}

switch args.Provider {
case provider.AWS:
// Scaling flags plausibility
if args.ScalingMin > 0 && args.ScalingMax > 0 {
if args.ScalingMin > args.ScalingMax {
return microerror.Mask(errors.WorkersMinMaxInvalidError)
}
}

// SpotPercentage check percentage
if args.SpotPercentage < 0 || args.SpotPercentage > 100 {
return microerror.Mask(errors.NotPercentage)
}

case provider.Azure:
if args.ScalingMin != args.ScalingMax {
return microerror.Maskf(errors.WorkersMinMaxInvalidError, "Provider '%s' does not support node pool autoscaling.", args.Provider)
}
}

return nil
Expand Down
2 changes: 1 addition & 1 deletion commands/create/nodepool/command_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ func TestVerifyPreconditions(t *testing.T) {
ClusterNameOrID: "cluster-id",
VmSize: "something-also-big",
ScalingMin: 3,
ScalingMax: 1,
ScalingMax: 10,
Provider: "azure",
},
errors.IsWorkersMinMaxInvalid,
Expand Down
2 changes: 0 additions & 2 deletions commands/list/nodepools/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,6 @@ func getOutputAzure(nps []*models.V5GetNodePoolsResponseItems) (string, error) {
color.CyanString("NAME"),
color.CyanString("AZ"),
color.CyanString("VM SIZE"),
color.CyanString("NODES MIN/MAX"),
color.CyanString("NODES DESIRED"),
color.CyanString("NODES READY"),
color.CyanString("CPUS"),
Expand Down Expand Up @@ -390,7 +389,6 @@ func getOutputAzure(nps []*models.V5GetNodePoolsResponseItems) (string, error) {
np.Name,
formatting.AvailabilityZonesList(np.AvailabilityZones),
vmSizes,
strconv.FormatInt(np.Scaling.Min, 10) + "/" + strconv.FormatInt(np.Scaling.Max, 10),
strconv.FormatInt(np.Status.Nodes, 10),
formatNodesReady(np.Status.Nodes, np.Status.NodesReady),
sumCPUs,
Expand Down
16 changes: 8 additions & 8 deletions commands/list/nodepools/command_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,21 +73,21 @@ a6bf4 New node pool C m5.2xlarge false 0 100
},
{
npResponse: `[
{"id": "np001-1", "name": "np001-1", "availability_zones": ["2"], "scaling": {"min": 1, "max": 2}, "node_spec": {"azure": {"vm_size": "Standard_D2s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}}
{"id": "np001-1", "name": "np001-1", "availability_zones": ["2"], "scaling": {"min": -1, "max": -1}, "node_spec": {"azure": {"vm_size": "Standard_D2s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}}
]`,
outputFormat: "table",
output: `ID NAME AZ VM SIZE NODES MIN/MAX NODES DESIRED NODES READY CPUS RAM (GB)
np001-1 np001-1 2 Standard_D2s_v3 1/2 0 0 0 0.0`,
output: `ID NAME AZ VM SIZE NODES DESIRED NODES READY CPUS RAM (GB)
np001-1 np001-1 2 Standard_D2s_v3 0 0 0 0.0`,
},
{
npResponse: `[
{"id": "np002-1", "name": "np002-1", "availability_zones": ["1"], "scaling": {"min": 1, "max": 2}, "node_spec": {"azure": {"vm_size": "Standard_D2s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}},
{"id": "np002-2", "name": "np002-2", "availability_zones": ["2", "3"], "scaling": {"min": 1, "max": 3}, "node_spec": {"azure": {"vm_size": "Standard_D2s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}}
{"id": "np002-1", "name": "np002-1", "availability_zones": ["1"], "scaling": {"min": -1, "max": -1}, "node_spec": {"azure": {"vm_size": "Standard_D2s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}},
{"id": "np002-2", "name": "np002-2", "availability_zones": ["2", "3"], "scaling": {"min": -1, "max": -1}, "node_spec": {"azure": {"vm_size": "Standard_D2s_v3"}, "volume_sizes_gb": {"docker": 50, "kubelet": 100}}, "status": {"nodes": 0, "nodes_ready": 0}}
]`,
outputFormat: "table",
output: `ID NAME AZ VM SIZE NODES MIN/MAX NODES DESIRED NODES READY CPUS RAM (GB)
np002-1 np002-1 1 Standard_D2s_v3 1/2 0 0 0 0.0
np002-2 np002-2 2,3 Standard_D2s_v3 1/3 0 0 0 0.0`,
output: `ID NAME AZ VM SIZE NODES DESIRED NODES READY CPUS RAM (GB)
np002-1 np002-1 1 Standard_D2s_v3 0 0 0 0.0
np002-2 np002-2 2,3 Standard_D2s_v3 0 0 0 0.0`,
},
{
npResponse: `[
Expand Down
9 changes: 0 additions & 9 deletions commands/show/nodepool/azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ func getOutputAzure(nodePool *models.V5GetNodePoolResponse) (string, error) {
table = append(table, color.YellowString("Name:")+"|"+nodePool.Name)
table = append(table, color.YellowString("Node VM sizes:")+"|"+formatVMSizeAzure(vmSizes, vmSizeDetails))
table = append(table, color.YellowString("Availability zones:")+"|"+formatAZsAzure(nodePool.AvailabilityZones))
table = append(table, color.YellowString("Node scaling:")+"|"+formatNodeScalingAzure(nodePool.Scaling))
table = append(table, color.YellowString("Nodes desired:")+fmt.Sprintf("|%d", nodePool.Status.Nodes))
table = append(table, color.YellowString("Nodes in state Ready:")+fmt.Sprintf("|%d", nodePool.Status.NodesReady))
table = append(table, color.YellowString("CPUs:")+"|"+formatCPUsAzure(nodePool.Status.NodesReady, vmSizeDetails))
Expand Down Expand Up @@ -70,14 +69,6 @@ func formatCPUsAzure(nodesReady int64, details *nodespec.VMSize) string {
return "n/a"
}

func formatNodeScalingAzure(scaling *models.V5GetNodePoolResponseScaling) string {
if scaling.Min == scaling.Max {
return fmt.Sprintf("Pinned to %d", scaling.Min)
}

return fmt.Sprintf("Autoscaling between %d and %d", scaling.Min, scaling.Max)
}

func formatRAMAzure(nodesReady int64, details *nodespec.VMSize) string {
if details != nil {
return strconv.FormatFloat(float64(nodesReady)*details.MemoryInMB/1000, 'f', 1, 64)
Expand Down
4 changes: 1 addition & 3 deletions commands/show/nodepool/command_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,7 @@ func Test_ShowNodePool(t *testing.T) {
"id": "nodepool-id",
"name": "Application servers",
"availability_zones": ["1", "2"],
"node_spec": {"azure": {"vm_size": "Standard_D2s_v3"}},
"scaling":{"min":2,"max":5},
"node_spec": {"azure": {"vm_size": "Standard_D2s_v3"}},
"status": {"nodes": 3, "nodes_ready": 3}
}`,
},
Expand All @@ -59,7 +58,6 @@ func Test_ShowNodePool(t *testing.T) {
"name": "Application servers",
"availability_zones": ["1", "2"],
"node_spec": {"azure": {"vm_size": "weird_one"}},
"scaling":{"min":2,"max":5},
"status": {"nodes": 3, "nodes_ready": 3}
}`,
},
Expand Down
16 changes: 13 additions & 3 deletions commands/update/nodepool/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@ import (
"github.com/giantswarm/microerror"
"github.com/spf13/cobra"

"github.com/giantswarm/gsctl/client"
"github.com/giantswarm/gsctl/clustercache"
"github.com/giantswarm/gsctl/pkg/provider"

"github.com/giantswarm/gsctl/client"
"github.com/giantswarm/gsctl/commands/errors"
"github.com/giantswarm/gsctl/flags"
)
Expand Down Expand Up @@ -136,8 +138,16 @@ func verifyPreconditions(args Arguments) error {
return microerror.Maskf(errors.NoOpError, "Nothing to update.")
}

if args.ScalingMin > args.ScalingMax && args.ScalingMax > 0 {
return microerror.Mask(errors.WorkersMinMaxInvalidError)
switch args.Provider {
case provider.AWS:
if args.ScalingMin > args.ScalingMax && args.ScalingMax > 0 {
return microerror.Mask(errors.WorkersMinMaxInvalidError)
}

case provider.Azure:
if args.ScalingMin != args.ScalingMax {
return microerror.Maskf(errors.WorkersMinMaxInvalidError, "Provider '%s' does not support node pool autoscaling.", args.Provider)
}
}

return nil
Expand Down
6 changes: 3 additions & 3 deletions commands/update/nodepool/command_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,15 +180,15 @@ func Test_verifyPreconditions(t *testing.T) {
errorMatcher: errors.IsNoOpError,
},
{
name: "case 6: bad scaling parameters, on azure",
name: "case 6: trying to provide unsupported arguments, on azure",
args: Arguments{
AuthToken: "token",
APIEndpoint: "https://mock-url",
Provider: "azure",
ClusterNameOrID: "cluster-id",
NodePoolID: "abc",
ScalingMin: 3,
ScalingMax: 1,
ScalingMin: 1,
ScalingMax: 3,
},
errorMatcher: errors.IsWorkersMinMaxInvalid,
},
Expand Down

0 comments on commit 4970574

Please sign in to comment.