diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index 75210c00ec..c28e9af344 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -264,6 +264,27 @@ module "eks" { additional = aws_iam_policy.node_additional.arn } + schedules = { + scale-up = { + min_size = 2 + max_size = "-1" # Retains current max size + desired_size = 2 + start_time = "2023-03-05T00:00:00Z" + end_time = "2024-03-05T00:00:00Z" + timezone = "Etc/GMT+0" + recurrence = "0 0 * * *" + }, + scale-down = { + min_size = 0 + max_size = "-1" # Retains current max size + desired_size = 0 + start_time = "2023-03-05T12:00:00Z" + end_time = "2024-03-05T12:00:00Z" + timezone = "Etc/GMT+0" + recurrence = "0 12 * * *" + } + } + tags = { ExtraTag = "EKS managed node group complete example" } diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index d94f815075..4bf3434663 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -82,6 +82,7 @@ module "eks_managed_node_group" { | Name | Type | |------|------| +| [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource | | [aws_eks_node_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | @@ -113,6 +114,7 @@ module "eks_managed_node_group" { | [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | | [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no | +| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no | | [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no | | [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no | | [disable\_api\_termination](#input\_disable\_api\_termination) | If true, enables EC2 instance termination protection | `bool` | `null` | no | @@ -159,6 +161,7 @@ module "eks_managed_node_group" { | [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | `map(string)` | `{}` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | | [remote\_access](#input\_remote\_access) | Configuration block with remote access settings. Only valid when `use_custom_launch_template` = `false` | `any` | `{}` | no | +| [schedules](#input\_schedules) | Map of autoscaling group schedule to create | `map(any)` | `{}` | no | | [subnet\_ids](#input\_subnet\_ids) | Identifiers of EC2 Subnets to associate with the EKS Node Group. These subnets must have the following resource tag: `kubernetes.io/cluster/CLUSTER_NAME` | `list(string)` | `null` | no | | [tag\_specifications](#input\_tag\_specifications) | The tags to apply to the resources during launch | `list(string)` |
[
"instance",
"volume",
"network-interface"
]
| no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | @@ -175,6 +178,7 @@ module "eks_managed_node_group" { | Name | Description | |------|-------------| +| [autoscaling\_group\_schedule\_arns](#output\_autoscaling\_group\_schedule\_arns) | ARNs of autoscaling group schedules | | [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role | | [iam\_role\_name](#output\_iam\_role\_name) | The name of the IAM role | | [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index dba2b58b5a..1d3fe81698 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -447,3 +447,25 @@ resource "aws_iam_role_policy_attachment" "additional" { policy_arn = each.value role = aws_iam_role.this[0].name } + +################################################################################ +# Autoscaling Group Schedule +################################################################################ + +resource "aws_autoscaling_schedule" "this" { + for_each = { for k, v in var.schedules : k => v if var.create && var.create_schedule } + + scheduled_action_name = each.key + autoscaling_group_name = aws_eks_node_group.this[0].resources[0].autoscaling_groups[0].name + + min_size = try(each.value.min_size, null) + max_size = try(each.value.max_size, null) + desired_capacity = try(each.value.desired_size, null) + start_time = try(each.value.start_time, null) + end_time = try(each.value.end_time, null) + time_zone = try(each.value.time_zone, null) + + # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] + # Cron examples: https://crontab.guru/examples.html + recurrence = try(each.value.recurrence, null) +} diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf index 0475953452..012cd46f4e 100644 --- a/modules/eks-managed-node-group/outputs.tf +++ b/modules/eks-managed-node-group/outputs.tf @@ -61,6 +61,15 @@ output "node_group_taints" { value = try(aws_eks_node_group.this[0].taint, []) } +################################################################################ +# Autoscaling Group Schedule +################################################################################ + +output "autoscaling_group_schedule_arns" { + description = "ARNs of autoscaling group schedules" + value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } +} + ################################################################################ # IAM Role ################################################################################ diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index 36645e0d90..197cd28c95 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -469,3 +469,19 @@ variable "iam_role_tags" { type = map(string) default = {} } + +################################################################################ +# Autoscaling Group Schedule +################################################################################ + +variable "create_schedule" { + description = "Determines whether to create autoscaling group schedule or not" + type = bool + default = true +} + +variable "schedules" { + description = "Map of autoscaling group schedule to create" + type = map(any) + default = {} +} diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index 6ec1534bd1..9b72f49a9f 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -680,7 +680,7 @@ resource "aws_autoscaling_group" "this" { ################################################################################ resource "aws_autoscaling_schedule" "this" { - for_each = var.create && var.create_schedule ? var.schedules : {} + for_each = { for k, v in var.schedules : k => v if var.create && var.create_schedule } scheduled_action_name = each.key autoscaling_group_name = aws_autoscaling_group.this[0].name diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf index c9c9b75254..5c83497218 100644 --- a/modules/self-managed-node-group/outputs.tf +++ b/modules/self-managed-node-group/outputs.tf @@ -82,7 +82,7 @@ output "autoscaling_group_vpc_zone_identifier" { } ################################################################################ -# autoscaling group schedule +# Autoscaling Group Schedule ################################################################################ output "autoscaling_group_schedule_arns" { diff --git a/node_groups.tf b/node_groups.tf index 0dacc1ed23..db78861a81 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -363,6 +363,9 @@ module "eks_managed_node_group" { # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.eks_managed_node_group_defaults, "iam_role_additional_policies", {})) + create_schedule = try(each.value.create_schedule, var.eks_managed_node_group_defaults.create_schedule, true) + schedules = try(each.value.schedules, var.eks_managed_node_group_defaults.schedules, {}) + # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, []))) cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.eks_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null @@ -426,7 +429,7 @@ module "self_managed_node_group" { mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) - create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, false) + create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, true) schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) delete_timeout = try(each.value.delete_timeout, var.self_managed_node_group_defaults.delete_timeout, null)