diff --git a/.gitignore b/.gitignore index 5c2a2a8..4006a71 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,9 @@ .idea/ .infracost/ node_modules/ +venv/ .terraform.lock.hcl *.tfstate* + +builds/ diff --git a/README.md b/README.md index c894a3c..9572a0c 100644 --- a/README.md +++ b/README.md @@ -12,18 +12,38 @@ can be realized by the user by creating multiple connections to the bastion host Check the `examples` directory for the module usage. -## Cost Estimation (for version 1.9.1) +## Cost Estimation (for version 2.4.0) ```text - Name Monthly Qty Unit Monthly Cost - - module.bastion_host.aws_autoscaling_group.on_spot[0] +Name Monthly Qty Unit Monthly Cost +module.bastion_host.aws_autoscaling_group.on_spot[0] └─ module.bastion_host.aws_launch_template.manual_start - └─ Instance usage (Linux/UNIX, spot, t3.nano) 1,460 hours $2.63 - └─ root_block_device - └─ Storage (general purpose SSD, gp3) 16 GB $1.52 - - OVERALL TOTAL $4.15 + ├─ Instance usage (Linux/UNIX, on-demand, t3.nano) 730 hours $4.38 + └─ root_block_device + └─ Storage (general purpose SSD, gp3) 16 GB $1.52 + └─ Instance usage (Linux/UNIX, spot, t3.nano) 730 hours $1.31 + └─ root_block_device + └─ Storage (general purpose SSD, gp3) 16 GB $1.52 + + module.bastion_host.aws_cloudwatch_log_group.panic_button_off + ├─ Data ingested Monthly cost depends on usage: $0.63 per GB + ├─ Archival Storage Monthly cost depends on usage: $0.0324 per GB + └─ Insights queries data scanned Monthly cost depends on usage: $0.0063 per GB + + module.bastion_host.aws_cloudwatch_log_group.panic_button_on + ├─ Data ingested Monthly cost depends on usage: $0.63 per GB + ├─ Archival Storage Monthly cost depends on usage: $0.0324 per GB + └─ Insights queries data scanned Monthly cost depends on usage: $0.0063 per GB + + module.bastion_host.aws_lambda_function.panic_button_off + ├─ Requests Monthly cost depends on usage: $0.20 per 1M requests + └─ Duration Monthly cost depends on usage: $0.0000166667 per GB-seconds + + module.bastion_host.aws_lambda_function.panic_button_on + ├─ Requests Monthly cost depends on usage: $0.20 per 1M requests + └─ Duration Monthly cost depends on usage: $0.0000166667 per GB-seconds + + OVERALL TOTAL $8.73 ``` ## Features @@ -34,6 +54,15 @@ Check the `examples` directory for the module usage. - use spot instances to save some money - provide IAM role for easy access - provide a script to connect to the bastion from your local machine +- panic switch to enable the bastions or disable them immediately + +### Panic Switch + +Two lambda functions are provided. One to enable the bastion host, e.g. if you have to work at night and the bastion +hosts are deactivated. The second lambda function disables the bastion host immediately no matter what. + +As both functions are destructive (they modify the autoscaling group), you have to re-apply this module as soon as +possible to restore the auto scaling setting (especially the schedules). ### Keepass Support For IAM User Credentials @@ -197,13 +226,14 @@ way you can access the database, Redis cluster, ... directly from your localhost | Name | Version | |------|---------| +| [archive](#provider\_archive) | 2.3.0 | | [aws](#provider\_aws) | 4.24.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [instance\_profile\_role](#module\_instance\_profile\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role | 5.2.0 | +| [instance\_profile\_role](#module\_instance\_profile\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role | 5.11.1 | ## Resources @@ -217,26 +247,36 @@ way you can access the database, Redis cluster, ... directly from your localhost | [aws_autoscaling_schedule.on_spot_down](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource | | [aws_autoscaling_schedule.on_spot_up](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource | | [aws_iam_policy.access_bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.lambda_switch_off](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | | [aws_iam_role.access_bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.lambda_switch_off](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.access_bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.lambda_switch_off](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_lambda_function.panic_button_switch_off](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function) | resource | | [aws_launch_configuration.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration) | resource | | [aws_launch_template.manual_start](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | | [aws_security_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group_rule.egress_open_ports](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_security_group_rule.egress_ssm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [archive_file.panic_button_lambda_switch_off](https://registry.terraform.io/providers/hashicorp/archive/latest/docs/data-sources/file) | data source | | [aws_ami.latest_amazon_linux](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.access_bastion](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.lambda_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.lambda_switch_off](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_region.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [ami\_name\_filter](#input\_ami\_name\_filter) | The search filter string for the bastion AMI. | `string` | `"amzn2-ami-hvm-*-x86_64-ebs"` | no | | [bastion\_access\_tag\_value](#input\_bastion\_access\_tag\_value) | Value added as tag 'bastion-access' of the launched EC2 instance to be used to restrict access to the machine vie IAM. | `string` | `"developer"` | no | | [egress\_open\_tcp\_ports](#input\_egress\_open\_tcp\_ports) | The list of TCP ports to open for outgoing traffic. | `list(number)` | n/a | yes | -| [iam\_role\_path](#input\_iam\_role\_path) | Role path for the created bastion instance profile. Must end with '/' | `string` | `"/"` | no | -| [iam\_user\_arns](#input\_iam\_user\_arns) | ARNs of the user who are allowed to assume the role giving access to the bastion host. | `list(string)` | n/a | yes | -| [instance](#input\_instance) | Defines the basic parameters for the EC2 instance used as Bastion host |
object({
type = string # EC2 instance type
desired_capacity = number # number of EC2 instances to run
root_volume_size = number # in GB
enable_monitoring = bool

enable_spot = bool
})
|
{
"desired_capacity": 1,
"enable_monitoring": false,
"enable_spot": false,
"root_volume_size": 8,
"type": "t3.nano"
}
| no | +| [iam\_role\_path](#input\_iam\_role\_path) | Role path for the created bastion instance profile. Must end with '/'. Not used if instance["profile\_name"] is set. | `string` | `"/"` | no | +| [iam\_user\_arns](#input\_iam\_user\_arns) | ARNs of the user who are allowed to assume the role giving access to the bastion host. Not used if instance["profile\_name"] is set. | `list(string)` | n/a | yes | +| [instance](#input\_instance) | Defines the basic parameters for the EC2 instance used as Bastion host |
object({
type = string # EC2 instance type
desired_capacity = number # number of EC2 instances to run
root_volume_size = number # in GB
enable_monitoring = bool

enable_spot = bool

profile_name = string
})
|
{
"desired_capacity": 1,
"enable_monitoring": false,
"enable_spot": false,
"profile_name": "",
"root_volume_size": 8,
"type": "t3.nano"
}
| no | +| [instances\_distribution](#input\_instances\_distribution) | Defines the parameters for mixed instances policy auto scaling |
object({
on_demand_base_capacity = number # absolute minimum amount of on_demand instances
on_demand_percentage_above_base_capacity = number # percentage split between on-demand and Spot instances
spot_allocation_strategy = string
})
|
{
"on_demand_base_capacity": 0,
"on_demand_percentage_above_base_capacity": 0,
"spot_allocation_strategy": "lowest-price"
}
| no | | [kms\_key\_arn](#input\_kms\_key\_arn) | The ARN of the KMS key used to encrypt the resources. | `string` | `null` | no | | [resource\_names](#input\_resource\_names) | Settings for generating resource names. Set the prefix and the separator according to your company style guide. |
object({
prefix = string
separator = string
})
|
{
"prefix": "bastion",
"separator": "-"
}
| no | | [schedule](#input\_schedule) | Defines when to start and stop the instances. Use 'start' and 'stop' with a cron expression and add the 'time\_zone'. |
object({
start = string
stop = string
time_zone = string
})
| `null` | no | diff --git a/autoscaling.tf b/autoscaling.tf index 6e43ce7..0d866ff 100644 --- a/autoscaling.tf +++ b/autoscaling.tf @@ -82,8 +82,8 @@ resource "aws_autoscaling_group" "on_spot" { } } -resource "aws_autoscaling_schedule" "on_demand_up" { - count = var.schedule != null && !var.instance.enable_spot ? 1 : 0 +resource "aws_autoscaling_schedule" "up" { + count = var.schedule != null ? 1 : 0 scheduled_action_name = "${local.resource_prefix_with_separator}start" recurrence = var.schedule["start"] @@ -92,10 +92,10 @@ resource "aws_autoscaling_schedule" "on_demand_up" { min_size = 1 max_size = var.instance.desired_capacity desired_capacity = var.instance.desired_capacity - autoscaling_group_name = aws_autoscaling_group.on_demand[0].name + autoscaling_group_name = local.auto_scaling_group.name } -resource "aws_autoscaling_schedule" "on_demand_down" { +resource "aws_autoscaling_schedule" "down" { count = var.schedule != null && !var.instance.enable_spot ? 1 : 0 scheduled_action_name = "${local.resource_prefix_with_separator}stop" @@ -105,31 +105,5 @@ resource "aws_autoscaling_schedule" "on_demand_down" { min_size = 0 max_size = 0 desired_capacity = 0 - autoscaling_group_name = aws_autoscaling_group.on_demand[0].name -} - -resource "aws_autoscaling_schedule" "on_spot_up" { - count = var.schedule != null && var.instance.enable_spot ? 1 : 0 - - scheduled_action_name = "${local.resource_prefix_with_separator}start" - recurrence = var.schedule["start"] - time_zone = var.schedule["time_zone"] - - min_size = 1 - max_size = var.instance.desired_capacity - desired_capacity = var.instance.desired_capacity - autoscaling_group_name = aws_autoscaling_group.on_spot[0].name -} - -resource "aws_autoscaling_schedule" "on_spot_down" { - count = var.schedule != null && var.instance.enable_spot ? 1 : 0 - - scheduled_action_name = "${local.resource_prefix_with_separator}stop" - recurrence = var.schedule["stop"] - time_zone = var.schedule["time_zone"] - - min_size = 0 - max_size = 0 - desired_capacity = 0 - autoscaling_group_name = aws_autoscaling_group.on_spot[0].name + autoscaling_group_name = local.auto_scaling_group.name } diff --git a/lambda/panic_button_switch_off.py b/lambda/panic_button_switch_off.py new file mode 100644 index 0000000..79470fa --- /dev/null +++ b/lambda/panic_button_switch_off.py @@ -0,0 +1,51 @@ +import logging +import os +import boto3 +from botocore.exceptions import ClientError + +logger = logging.getLogger(__name__) +logger.setLevel(os.environ.get('LOG_LEVEL', 'info').upper()) + +def handler(event, context): + # change the ASG to disable automatic restart + disable_asg(os.environ['AUTO_SCALING_GROUP_NAME']) + + # find the EC2 instances and kill them + kill_running_bastion_hosts(os.environ['BASTION_HOST_NAME']) + + logger.info("Bastion host(s) switched off") + +def disable_asg(autoscaling_group_name): + asg = boto3.client('autoscaling') + + try: + asg.update_auto_scaling_group(AutoScalingGroupName=autoscaling_group_name, MinSize=0, MaxSize=0, + DesiredCapacity=0) + except ClientError as e: + logger.error('Failed to update the ASG %s', autoscaling_group_name, exc_info=e) + + raise + + +def kill_running_bastion_hosts(name): + ec2 = boto3.client('ec2') + + try: + instances = ec2.describe_instances(Filters=[{'Name': 'tag:Name', 'Values': [f'{name}']}, + {'Name': 'instance-state-name', 'Values': ['pending', 'running']}]) + + if 'Reservations' in instances: + instance_ids = [] + + for r in instances['Reservations']: + for i in r['Instances']: + instance_ids.append(i['InstanceId']) + + if instance_ids: + ec2.stop_instances(InstanceIds=instance_ids) + + logger.info("Bastion killed: %s", instance_ids) + except ClientError as e: + logger.error('Failed to kill the bastion EC2 instance(s): %s', name, exc_info=e) + + raise diff --git a/lambda/panic_button_switch_on.py b/lambda/panic_button_switch_on.py new file mode 100644 index 0000000..8266527 --- /dev/null +++ b/lambda/panic_button_switch_on.py @@ -0,0 +1,37 @@ +import logging +import os +import boto3 +from botocore.exceptions import ClientError + +logger = logging.getLogger(__name__) +logger.setLevel(os.environ.get('LOG_LEVEL', 'info').upper()) + +def handler(event, context): + asg = boto3.client('autoscaling') + + auto_scaling_group_name = os.environ['AUTO_SCALING_GROUP_NAME'] + + try: + # set min/max/desired + asg.update_auto_scaling_group(AutoScalingGroupName=auto_scaling_group_name, + MinSize=int(os.environ['AUTO_SCALING_GROUP_MIN_SIZE']), + MaxSize=int(os.environ['AUTO_SCALING_GROUP_MAX_SIZE']), + DesiredCapacity=int(os.environ['AUTO_SCALING_GROUP_DESIRED_CAPACITY'])) + + # remove all schedules + response = asg.describe_scheduled_actions(AutoScalingGroupName=auto_scaling_group_name); + + schedule_names = [] + + for schedule in response['ScheduledUpdateGroupActions']: + schedule_names.append(schedule['ScheduledActionName']) + + if schedule_names: + asg.batch_delete_scheduled_action(AutoScalingGroupName=auto_scaling_group_name, + ScheduledActionNames=schedule_names) + except ClientError as e: + logger.error('Failed to update the ASG %s', auto_scaling_group_name, exc_info=e) + + raise + + logger.info("Bastion host(s) switched on") diff --git a/locals.tf b/locals.tf index a40c929..b0cef9f 100644 --- a/locals.tf +++ b/locals.tf @@ -14,4 +14,14 @@ locals { bastion_access_tag_name = "bastion-access" bastion_instance_profile_name = var.instance["profile_name"] != "" ? var.instance["profile_name"] : module.instance_profile_role[0].iam_role_name + + panic_button_switch_off_lambda_source_file_name = "panic_button_switch_off.py" + panic_button_switch_off_lambda_source = "${path.module}/lambda/${local.panic_button_switch_off_lambda_source_file_name}" + panic_button_switch_off_lambda_name = "${var.resource_names.prefix}${var.resource_names.separator}panic-button-off" + + panic_button_switch_on_lambda_source_file_name = "panic_button_switch_on.py" + panic_button_switch_on_lambda_source = "${path.module}/lambda/${local.panic_button_switch_on_lambda_source_file_name}" + panic_button_switch_on_lambda_name = "${var.resource_names.prefix}${var.resource_names.separator}panic-button-on" + + auto_scaling_group = var.instance.enable_spot ? aws_autoscaling_group.on_spot[0] : aws_autoscaling_group.on_demand[0] } diff --git a/panic-button-off.tf b/panic-button-off.tf new file mode 100644 index 0000000..84396ed --- /dev/null +++ b/panic-button-off.tf @@ -0,0 +1,126 @@ +resource "aws_iam_role" "panic_button_off_execution" { + name = "${var.resource_names.prefix}${var.resource_names.separator}panic-button-off" + description = "Role for executing the bastion panic button switch off" + assume_role_policy = data.aws_iam_policy_document.panic_button_off_assume_role.json + force_detach_policies = true + + tags = var.tags +} + +data "aws_iam_policy_document" "panic_button_off_assume_role" { + statement { + actions = [ + "sts:AssumeRole", + ] + effect = "Allow" + + principals { + identifiers = ["lambda.amazonaws.com"] + type = "Service" + } + } +} + +data "aws_iam_policy_document" "panic_button_off" { + statement { + sid = "ListInstances" + actions = [ + "ec2:DescribeInstances" + ] + resources = ["*"] + effect = "Allow" + } + + statement { + sid = "KillBastionHosts" + actions = [ + "ec2:StopInstances" + ] + # we do not know the instances as they are created dynamically. But we use a condition to allow valid ones only + # tfsec:ignore:aws-iam-no-policy-wildcards + resources = ["*"] + condition { + test = "StringEquals" + values = [local.bastion_host_name] + variable = "aws:ResourceTag/Name" + } + effect = "Allow" + } + + statement { + sid="UpdateASG" + actions = ["autoscaling:UpdateAutoScalingGroup"] + resources = [local.auto_scaling_group.arn] + effect = "Allow" + } +} + +resource "aws_iam_policy" "panic_button_off" { + name = "${var.resource_names.prefix}${var.resource_names.separator}switch-off" + policy = data.aws_iam_policy_document.panic_button_off.json + + tags = var.tags +} + +resource "aws_iam_role_policy_attachment" "panic_button_off" { + role = aws_iam_role.panic_button_off_execution.name + policy_arn = aws_iam_policy.panic_button_off.arn +} + +resource "aws_iam_role_policy_attachment" "panic_button_off_basic_execution" { + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + role = aws_iam_role.panic_button_off_execution.id +} + +resource "aws_iam_role_policy_attachment" "panic_button_off_x_ray" { + policy_arn = "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess" + role = aws_iam_role.panic_button_off_execution.id +} + +data "archive_file" "panic_button_off_package" { + type = "zip" + source_file = local.panic_button_switch_off_lambda_source + output_path = "${path.root}/builds/${local.panic_button_switch_off_lambda_source_file_name}.zip" +} + +resource "aws_lambda_function" "panic_button_off" { + architectures = ["arm64"] + description = "Terminates all bastion hosts forever" + filename = data.archive_file.panic_button_off_package.output_path + source_code_hash = data.archive_file.panic_button_off_package.output_base64sha256 + function_name = local.panic_button_switch_off_lambda_name + handler = "panic_button_switch_off.handler" + timeout = 30 + memory_size = 256 + #package_type = "Zip" + publish = true + role = aws_iam_role.panic_button_off_execution.arn + runtime = "python3.9" + + environment { + variables = { + AUTO_SCALING_GROUP_NAME = local.auto_scaling_group.name + BASTION_HOST_NAME = local.bastion_host_name + + LOG_LEVEL = "info" + } + } + + tracing_config { + mode = "Active" + } + + tags = var.tags + + # otherwise the Lambda auto-creates the group which conflicts with Terraform + depends_on = [aws_cloudwatch_log_group.panic_button_off] +} + +resource "aws_cloudwatch_log_group" "panic_button_off" { + name = "/aws/lambda/${local.panic_button_switch_off_lambda_name}" + retention_in_days = 3 + + kms_key_id = var.kms_key_arn + + tags = var.tags +} diff --git a/panic-button-on.tf b/panic-button-on.tf new file mode 100644 index 0000000..f2fd173 --- /dev/null +++ b/panic-button-on.tf @@ -0,0 +1,110 @@ +resource "aws_iam_role" "panic_button_on_execution" { + name = "${var.resource_names.prefix}${var.resource_names.separator}panic-button-on" + description = "Role for executing the bastion panic button switch off" + assume_role_policy = data.aws_iam_policy_document.panic_button_on_assume_role.json + force_detach_policies = true + + tags = var.tags +} + +data "aws_iam_policy_document" "panic_button_on_assume_role" { + statement { + actions = [ + "sts:AssumeRole", + ] + effect = "Allow" + + principals { + identifiers = ["lambda.amazonaws.com"] + type = "Service" + } + } +} + +data "aws_iam_policy_document" "panic_button_on" { + statement { + sid="UpdateASG" + actions = ["autoscaling:UpdateAutoScalingGroup", "autoscaling:DeleteScheduledAction"] + resources = [local.auto_scaling_group.arn] + effect = "Allow" + } + + statement { + sid="DescribeASG" + actions = ["autoscaling:DescribeScheduledActions"] + resources = ["*"] + effect = "Allow" + } +} + +resource "aws_iam_policy" "panic_button_on" { + name = "${var.resource_names.prefix}${var.resource_names.separator}switch-on" + policy = data.aws_iam_policy_document.panic_button_on.json + + tags = var.tags +} + +resource "aws_iam_role_policy_attachment" "panic_button_on" { + role = aws_iam_role.panic_button_on_execution.name + policy_arn = aws_iam_policy.panic_button_on.arn +} + +resource "aws_iam_role_policy_attachment" "panic_button_on_basic_execution" { + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + role = aws_iam_role.panic_button_on_execution.id +} + +resource "aws_iam_role_policy_attachment" "panic_button_on_x_ray" { + policy_arn = "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess" + role = aws_iam_role.panic_button_on_execution.id +} + +data "archive_file" "panic_button_on_package" { + type = "zip" + source_file = local.panic_button_switch_on_lambda_source + output_path = "${path.root}/builds/${local.panic_button_switch_on_lambda_source_file_name}.zip" +} + +resource "aws_lambda_function" "panic_button_on" { + architectures = ["arm64"] + description = "Start all bastion hosts immediately" + filename = data.archive_file.panic_button_on_package.output_path + source_code_hash = data.archive_file.panic_button_on_package.output_base64sha256 + function_name = local.panic_button_switch_on_lambda_name + handler = "panic_button_switch_on.handler" + timeout = 30 + memory_size = 256 + package_type = "Zip" + publish = true + role = aws_iam_role.panic_button_on_execution.arn + runtime = "python3.9" + + environment { + variables = { + AUTO_SCALING_GROUP_NAME = local.auto_scaling_group.name + AUTO_SCALING_GROUP_MIN_SIZE = local.auto_scaling_group.min_size + AUTO_SCALING_GROUP_MAX_SIZE = local.auto_scaling_group.max_size + AUTO_SCALING_GROUP_DESIRED_CAPACITY = local.auto_scaling_group.desired_capacity + + LOG_LEVEL = "info" + } + } + + tracing_config { + mode = "Active" + } + + tags = var.tags + + # otherwise the Lambda auto-creates the group which conflicts with Terraform + depends_on = [aws_cloudwatch_log_group.panic_button_on] +} + +resource "aws_cloudwatch_log_group" "panic_button_on" { + name = "/aws/lambda/${local.panic_button_switch_on_lambda_name}" + retention_in_days = 3 + + kms_key_id = var.kms_key_arn + + tags = var.tags +} diff --git a/provider.tf b/provider.tf index 8d06536..0580dc8 100644 --- a/provider.tf +++ b/provider.tf @@ -1,5 +1,10 @@ terraform { required_providers { + archive = { + source = "hashicorp/archive" + version = ">= 2.0.0" + } + aws = { source = "hashicorp/aws" version = ">= 4.0.0"