Skip to content

Commit

Permalink
enha: added option to create glacier vault instead (#2)
Browse files Browse the repository at this point in the history
  • Loading branch information
kfc-manager committed Apr 1, 2024
1 parent 8e506cc commit 5a31e4a
Show file tree
Hide file tree
Showing 6 changed files with 152 additions and 24 deletions.
16 changes: 10 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,13 @@ This module provides a S3 bucket and multiple SQS queus which receive an event,

## Inputs

| Name | Description | Type | Default | Required |
| ------------- | ----------------------------------------------------------------------------------------------------- | -------------- | ------- | :------: |
| identifier | Unique identifier to differentiate global resources. | `string` | n/a | yes |
| force_destroy | A flag for wether or not being able to destroy a non empty bucket. | `bool` | true | no |
| queues | A list of object to define SQS queues. | `list(object)` | [] | no |
| tags | A map of tags to add to all resources. Name is always set as tag and the other tags will be appended. | `map(string)` | {} | no |
| Name | Description | Type | Default | Required |
| ------------- | ----------------------------------------------------------------------------------------------------- | -------------- | --------- | :------: |
| identifier | Unique identifier to differentiate global resources. | `string` | n/a | yes |
| force_destroy | A flag for wether or not being able to destroy a non empty bucket. | `bool` | true | no |
| storage_class | Storage class of the S3 bucket. For example 'GLACIER' for a deep archive bucket. | `string` | "DEFAULT" | no |
| queues | A list of object to define SQS queues. | `list(object)` | [] | no |
| tags | A map of tags to add to all resources. Name is always set as tag and the other tags will be appended. | `map(string)` | {} | no |

### `queues`

Expand All @@ -44,6 +45,7 @@ This module provides a S3 bucket and multiple SQS queus which receive an event,
| ------ | ---------------------------------------------------- |
| id | The ID of the S3 bucket. |
| arn | The ARN of the S3 bucket. |
| uri | The URI of the S3 bucket. |
| queues | List of objects with data of the created SQS queues. |

### `queues`
Expand All @@ -61,6 +63,8 @@ module "bucket" {
identifier = "example-bucket-dev"
force_destroy = true
storage_class = "DEFAULT"
queues = [
{
identifier = "example-bucket-queue-one-dev"
Expand Down
53 changes: 40 additions & 13 deletions main.tf
Original file line number Diff line number Diff line change
@@ -1,30 +1,57 @@
################################
# S3 Bucket #
################################

# to ensure unique identifier of S3 bucket
resource "random_string" "suffix" {
length = 63 - length(var.identifier) - 1
special = false
upper = false
}

################################
# Glacier Vault #
################################

resource "aws_sns_topic" "glacier" {
count = var.storage_class == "GLACIER" ? 1 : 0
name = "${var.identifier}-glacier"

tags = var.tags
}

resource "aws_glacier_vault" "main" {
count = var.storage_class == "GLACIER" ? 1 : 0
name = "${var.identifier}-${random_string.suffix.result}"

notification {
sns_topic = aws_sns_topic.glacier[0].arn
events = ["ArchiveRetrievalCompleted", "InventoryRetrievalCompleted"]
}

tags = var.tags
}

################################
# S3 Bucket #
################################

resource "aws_s3_bucket" "main" {
count = var.storage_class == "DEFAULT" ? 1 : 0
bucket = "${var.identifier}-${random_string.suffix.result}"
force_destroy = var.force_destroy

tags = var.tags
}

resource "aws_s3_bucket_ownership_controls" "main" {
bucket = aws_s3_bucket.main.id
count = var.storage_class == "DEFAULT" ? 1 : 0
bucket = aws_s3_bucket.main[0].id

rule {
object_ownership = "BucketOwnerPreferred"
}
}

resource "aws_s3_bucket_public_access_block" "main" {
bucket = aws_s3_bucket.main.id
count = var.storage_class == "DEFAULT" ? 1 : 0
bucket = aws_s3_bucket.main[0].id

block_public_acls = true
block_public_policy = true
Expand Down Expand Up @@ -54,12 +81,12 @@ data "aws_iam_policy_document" "topic" {
condition {
test = "ArnEquals"
variable = "aws:SourceArn"
values = [aws_s3_bucket.main.arn]
values = [aws_s3_bucket.main[0].arn]
}
}
}

resource "aws_sns_topic" "main" {
resource "aws_sns_topic" "fanout" {
count = length(var.queues) > 1 ? 1 : 0
name = "${var.identifier}-sqs-fanout"
policy = data.aws_iam_policy_document.topic[0].json
Expand All @@ -85,17 +112,17 @@ data "aws_iam_policy_document" "fanout" {
condition {
test = "ArnEquals"
variable = "aws:SourceArn"
values = [aws_sns_topic.main[0].arn]
values = [aws_sns_topic.fanout[0].arn]
}
}
}

resource "aws_s3_bucket_notification" "topic" {
count = length(var.queues) > 1 ? 1 : 0
bucket = aws_s3_bucket.main.id
bucket = aws_s3_bucket.main[0].id

topic {
topic_arn = aws_sns_topic.main[0].arn
topic_arn = aws_sns_topic.fanout[0].arn
events = ["s3:ObjectCreated:*"]
}
}
Expand Down Expand Up @@ -123,7 +150,7 @@ data "aws_iam_policy_document" "queue" {
condition {
test = "ArnEquals"
variable = "aws:SourceArn"
values = [aws_s3_bucket.main.arn]
values = [aws_s3_bucket.main[0].arn]
}
}
}
Expand Down Expand Up @@ -162,7 +189,7 @@ resource "aws_sqs_queue_redrive_allow_policy" "main" {

resource "aws_s3_bucket_notification" "queue" {
count = length(var.queues) == 1 ? 1 : 0
bucket = aws_s3_bucket.main.id
bucket = aws_s3_bucket.main[0].id

queue {
queue_arn = aws_sqs_queue.main[0].arn
Expand Down
12 changes: 10 additions & 2 deletions outputs.tf
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
output "id" {
description = "The ID of the S3 bucket."
value = try(aws_s3_bucket.main.id, null)
value = "${var.identifier}-${random_string.suffix.result}"
}

output "arn" {
description = "The ARN of the S3 bucket."
value = try(aws_s3_bucket.main.arn, null)
value = var.storage_class == "DEFAULT" ? try(aws_s3_bucket.main[0].arn, null) : (
try(aws_glacier_vault.main[0].arn, null))
}

output "uri" {
description = "The URI of the S3 bucket."
value = var.storage_class == "DEFAULT" ? (
try(aws_s3_bucket.main[0].bucket_regional_domain_name, null)) : (
try(aws_glacier_vault.main[0].location, null))
}

output "queues" {
Expand Down
79 changes: 79 additions & 0 deletions tests/bucket.tftest.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,82 @@ run "valid_identifier" {
identifier = "abc"
}
}

run "invalid_storage_class" {
command = plan

variables {
identifier = "abc"
storage_class = "TEST"
}

expect_failures = [var.storage_class]
}

run "default_bucket" {
command = plan

variables {
identifier = "abc"
storage_class = "DEFAULT"
}

assert {
condition = length(aws_s3_bucket.main) == 1
error_message = "S3 bucket was not created"
}

assert {
condition = length(aws_s3_bucket_ownership_controls.main) == 1
error_message = "S3 bucket ownership controlas were not created"
}

assert {
condition = length(aws_s3_bucket_public_access_block.main) == 1
error_message = "S3 bucket public access block was not created"
}

assert {
condition = length(aws_glacier_vault.main) == 0
error_message = "Glacier vault was created unexpectedly"
}

assert {
condition = length(aws_sns_topic.glacier) == 0
error_message = "SNS topic of glacier vault was created unexpectedly"
}
}

run "glacier_bucket" {
command = plan

variables {
identifier = "abc"
storage_class = "GLACIER"
}

assert {
condition = length(aws_s3_bucket.main) == 0
error_message = "S3 bucket was created unexpectedly"
}

assert {
condition = length(aws_s3_bucket_ownership_controls.main) == 0
error_message = "S3 bucket ownership controlas were created unexpectedly"
}

assert {
condition = length(aws_s3_bucket_public_access_block.main) == 0
error_message = "S3 bucket public access block was not created unexpectedly"
}

assert {
condition = length(aws_glacier_vault.main) == 1
error_message = "Glacier vault was not created"
}

assert {
condition = length(aws_sns_topic.glacier) == 1
error_message = "SNS topic of glacier vault was not created"
}
}
6 changes: 3 additions & 3 deletions tests/queue.tftest.hcl
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ run "no_queue" {
}

assert {
condition = length(aws_sns_topic.main) == 0
condition = length(aws_sns_topic.fanout) == 0
error_message = "SNS topic was created unexpectedly"
}

Expand Down Expand Up @@ -162,7 +162,7 @@ run "single_queue" {
}

assert {
condition = length(aws_sns_topic.main) == 0
condition = length(aws_sns_topic.fanout) == 0
error_message = "SNS topic was created unexpectedly"
}

Expand Down Expand Up @@ -230,7 +230,7 @@ run "multiple_queues" {
}

assert {
condition = length(aws_sns_topic.main) == 1
condition = length(aws_sns_topic.fanout) == 1
error_message = "SNS topic was not created"
}

Expand Down
10 changes: 10 additions & 0 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,16 @@ variable "force_destroy" {
default = true
}

variable "storage_class" {
description = "Storage class of the S3 bucket. For example 'GLACIER' for a deep archive bucket."
type = string
default = "DEFAULT"
validation {
condition = var.storage_class == "DEFAULT" || var.storage_class == "GLACIER"
error_message = "Storage class must be either 'DEFAULT' or 'GLACIER'"
}
}

variable "queues" {
description = "A list of object to define SQS queues."
type = list(object({
Expand Down

0 comments on commit 5a31e4a

Please sign in to comment.