Skip to content

Commit

Permalink
feat(s3): add skip destination validation property (#30916)
Browse files Browse the repository at this point in the history
### Issue #30914

Closes #30914.

### Reason for this change

When customers call this API to setup S3 notification configuration for SQS/SNS/Lambda S3 sends s3:TestEvent in order to validate permissions. (For Lambda it does dryrun function invocation instead)

However, some customers do not want to do that and test permissions during CDK deployment.

Internal reference: `49359101-0e5e-43f3-99eb-3c6c5ed68db1`

For example, one customer does not want these test events because they have alarm on unconsumed messages in SQS and they do not have any SQS consumers. And they update notification configuration frequently, which leads to many test events in the queue. See internal ticket: `P142186522`

### Description of changes

Expose skip destination validation property when calling PutBucketNotification API in Bucket props.

### Description of how you validated changes

Unit test updated.

Updated integration tests. Note that 2 integration tests I had to fix and run them with `--disable-update-workflow` flag because they were failing:
 - `integ.s3.imported-bucket.js` test failed because someone already created bucket `cdk-integration-test-s3-imported-bucket-name`
 - `integ.bucket-notifications.js` test failed because of overlapping suffix error (not sure how it was passing previously):
 
 ```
❌  cdk-integ-lambda-bucket-s3-notifications failed: Error: The stack named cdk-integ-lambda-bucket-s3-notifications failed to deploy: UPDATE_FAILED 
(The following resource(s) failed to update: [Construct1IntegUnmanagedBucket1Notifications4A1599D7]. ): Received response status [FAILED] from custom resource. 
Message returned: Error: An error occurred (InvalidArgument) when calling the PutBucketNotificationConfiguration operation: Configuration is ambiguously defined. 
Cannot have overlapping suffixes in two rules if the prefixes are overlapping for the same event type.. 
See the details in CloudWatch Log Stream: 2024/07/22/[$LATEST]e6a16cf979dd4671998e7d911769ff42 (RequestId: 19f6fcd7-d31d-4fbf-9f4a-e3b7cba1cd2b), 
Rolling back the failed resource only., Received response status [FAILED] from custom resource. Message returned: Error: An error occurred 
(InvalidArgument) when calling the PutBucketNotificationConfiguration operation: Configuration is ambiguously defined. 
Cannot have overlapping suffixes in two rules if the prefixes are overlapping for the same event type.. 
See the details in CloudWatch Log Stream: 2024/07/22/[$LATEST]c30efd0375d64b8088e0ee64d63ce4db (RequestId: 19f6fcd7-d31d-4fbf-9f4a-e3b7cba1cd2b)
```

### Checklist
- [X] My code adheres to the [CONTRIBUTING GUIDE](https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md) and [DESIGN GUIDELINES](https://github.com/aws/aws-cdk/blob/main/docs/DESIGN_GUIDELINES.md)

----

*By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license*
  • Loading branch information
yerzhan7 committed Aug 19, 2024
1 parent c1c800e commit ffc40e5
Show file tree
Hide file tree
Showing 53 changed files with 2,231 additions and 2,806 deletions.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,18 @@
"Resources": {
"bucket43879C71": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketName": "cdk-integration-test-s3-imported-bucket-name"
"UpdateReplacePolicy": "Delete",
"DeletionPolicy": "Delete"
}
},
"Outputs": {
"ExportsOutputRefbucket43879C716CF1CFA3": {
"Value": {
"Ref": "bucket43879C71"
},
"UpdateReplacePolicy": "Retain",
"DeletionPolicy": "Retain"
"Export": {
"Name": "TestStack1:ExportsOutputRefbucket43879C716CF1CFA3"
}
}
},
"Parameters": {
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
@@ -1,5 +1,55 @@
{
"Resources": {
"FServiceRole3AC82EE1": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}
],
"Version": "2012-10-17"
},
"ManagedPolicyArns": [
{
"Fn::Join": [
"",
[
"arn:",
{
"Ref": "AWS::Partition"
},
":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
]
]
}
]
}
},
"FC4345940": {
"Type": "AWS::Lambda::Function",
"Properties": {
"Code": {
"ZipFile": "exports.handler = async function handler(event) {\n console.log('event:', JSON.stringify(event, undefined, 2));\n return { event };\n}"
},
"Handler": "index.handler",
"Role": {
"Fn::GetAtt": [
"FServiceRole3AC82EE1",
"Arn"
]
},
"Runtime": "nodejs18.x"
},
"DependsOn": [
"FServiceRole3AC82EE1"
]
},
"ImportedNotificationsDB5DE386": {
"Type": "Custom::S3BucketNotifications",
"Properties": {
Expand All @@ -9,7 +59,9 @@
"Arn"
]
},
"BucketName": "cdk-integration-test-s3-imported-bucket-name",
"BucketName": {
"Fn::ImportValue": "TestStack1:ExportsOutputRefbucket43879C716CF1CFA3"
},
"NotificationConfiguration": {
"LambdaFunctionConfigurations": [
{
Expand All @@ -25,13 +77,14 @@
}
]
},
"Managed": false
"Managed": false,
"SkipDestinationValidation": false
},
"DependsOn": [
"ImportedAllowBucketNotificationsToTestStackF6B9A922242C13EDE"
"ImportedAllowBucketNotificationsToTestStack2F56424633CA7CA6E4"
]
},
"ImportedAllowBucketNotificationsToTestStackF6B9A922242C13EDE": {
"ImportedAllowBucketNotificationsToTestStack2F56424633CA7CA6E4": {
"Type": "AWS::Lambda::Permission",
"Properties": {
"Action": "lambda:InvokeFunction",
Expand All @@ -53,62 +106,15 @@
{
"Ref": "AWS::Partition"
},
":s3:::cdk-integration-test-s3-imported-bucket-name"
":s3:::",
{
"Fn::ImportValue": "TestStack1:ExportsOutputRefbucket43879C716CF1CFA3"
}
]
]
}
}
},
"FServiceRole3AC82EE1": {
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}
],
"Version": "2012-10-17"
},
"ManagedPolicyArns": [
{
"Fn::Join": [
"",
[
"arn:",
{
"Ref": "AWS::Partition"
},
":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
]
]
}
]
}
},
"FC4345940": {
"Type": "AWS::Lambda::Function",
"Properties": {
"Code": {
"ZipFile": "exports.handler = async function handler(event) {\n console.log('event:', JSON.stringify(event, undefined, 2));\n return { event };\n}"
},
"Handler": "index.handler",
"Role": {
"Fn::GetAtt": [
"FServiceRole3AC82EE1",
"Arn"
]
},
"Runtime": "nodejs18.x"
},
"DependsOn": [
"FServiceRole3AC82EE1"
]
},
"BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC": {
"Type": "AWS::IAM::Role",
"Properties": {
Expand Down Expand Up @@ -169,7 +175,7 @@
"Properties": {
"Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)",
"Code": {
"ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n old = event.get(\"OldResourceProperties\", {}).get(\"NotificationConfiguration\", {})\n if managed:\n config = handle_managed(event[\"RequestType\"], notification_configuration)\n else:\n config = handle_unmanaged(props[\"BucketName\"], stack_id, event[\"RequestType\"], notification_configuration, old)\n s3.put_bucket_notification_configuration(Bucket=props[\"BucketName\"], NotificationConfiguration=config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration, old):\n def get_id(n):\n n['Id'] = ''\n strToHash=json.dumps(n, sort_keys=True).replace('\"Name\": \"prefix\"', '\"Name\": \"Prefix\"').replace('\"Name\": \"suffix\"', '\"Name\": \"Suffix\"')\n return f\"{stack_id}-{hash(strToHash)}\"\n def with_id(n):\n n['Id'] = get_id(n)\n return n\n\n external_notifications = {}\n existing_notifications = s3.get_bucket_notification_configuration(Bucket=bucket)\n for t in CONFIGURATION_TYPES:\n if request_type == 'Update':\n old_incoming_ids = [get_id(n) for n in old.get(t, [])]\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not get_id(n) in old_incoming_ids] \n elif request_type == 'Delete':\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n elif request_type == 'Create':\n external_notifications[t] = [n for n in existing_notifications.get(t, [])]\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n if request_type == 'Delete':\n return external_notifications\n\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))"
"ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n skipDestinationValidation = props.get('SkipDestinationValidation', 'false').lower() == 'true'\n stack_id = event['StackId']\n old = event.get(\"OldResourceProperties\", {}).get(\"NotificationConfiguration\", {})\n if managed:\n config = handle_managed(event[\"RequestType\"], notification_configuration)\n else:\n config = handle_unmanaged(props[\"BucketName\"], stack_id, event[\"RequestType\"], notification_configuration, old)\n s3.put_bucket_notification_configuration(Bucket=props[\"BucketName\"], NotificationConfiguration=config, SkipDestinationValidation=skipDestinationValidation)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration, old):\n def get_id(n):\n n['Id'] = ''\n strToHash=json.dumps(n, sort_keys=True).replace('\"Name\": \"prefix\"', '\"Name\": \"Prefix\"').replace('\"Name\": \"suffix\"', '\"Name\": \"Suffix\"')\n return f\"{stack_id}-{hash(strToHash)}\"\n def with_id(n):\n n['Id'] = get_id(n)\n return n\n\n external_notifications = {}\n existing_notifications = s3.get_bucket_notification_configuration(Bucket=bucket)\n for t in CONFIGURATION_TYPES:\n if request_type == 'Update':\n old_incoming_ids = [get_id(n) for n in old.get(t, [])]\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not get_id(n) in old_incoming_ids] \n elif request_type == 'Delete':\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n elif request_type == 'Create':\n external_notifications[t] = [n for n in existing_notifications.get(t, [])]\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n if request_type == 'Delete':\n return external_notifications\n\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))"
},
"Handler": "index.handler",
"Role": {
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit ffc40e5

Please sign in to comment.