Skip to content

Commit

Permalink
fix: custom resources log sensitive ResponseURL field (aws#20899)
Browse files Browse the repository at this point in the history
All custom resource implementations start by logging the input `event`, so that issues will be easier to diagnose.

Part of the payload of this event is the `ResponseURL` field, which is a pre-signed S3 URL where CloudFormation expects the success/failure result of the Custom Resource. By logging the `ResponseURL` to CloudWatch, we open an attack vector where an attacker who has access to read CloudWatch URLs can race the Custom Resource implementation to write a fake response to the presigned S3 URL, thereby faking the result of the CR (making the deployment fail when it should have succeeded, or make it succeed when it should have failed).

Remove the `ResponseURL` from all logging output, and have the Custom Resource Provider remove it from the Payload that gets sent to the user function as well. 

----

*By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license*
  • Loading branch information
rix0rrr committed Jul 1, 2022
1 parent e502590 commit 6b4f92f
Show file tree
Hide file tree
Showing 98 changed files with 339 additions and 586 deletions.

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions packages/@aws-cdk/aws-dynamodb/lib/replica-handler/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import type { IsCompleteRequest, IsCompleteResponse, OnEventRequest, OnEventResp
import { DynamoDB } from 'aws-sdk'; // eslint-disable-line import/no-extraneous-dependencies

export async function onEventHandler(event: OnEventRequest): Promise<OnEventResponse> {
console.log('Event: %j', event);
console.log('Event: %j', { ...event, ResponseURL: '...' });

const dynamodb = new DynamoDB();

Expand Down Expand Up @@ -50,7 +50,7 @@ export async function onEventHandler(event: OnEventRequest): Promise<OnEventResp
}

export async function isCompleteHandler(event: IsCompleteRequest): Promise<IsCompleteResponse> {
console.log('Event: %j', event);
console.log('Event: %j', { ...event, ResponseURL: '...' });

const dynamodb = new DynamoDB();

Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
{
"version": "17.0.0",
"version": "20.0.0",
"files": {
"9abf2316979a565d3a86e43cce5b25037aee1b5cd3885835f8b3daec825b517e": {
"2cc27380df0d00a7348926c8fe9b6ae056ac18cee09087d824792e9611cde97e": {
"source": {
"path": "aws-ecs-integ.template.json",
"packaging": "file"
},
"destinations": {
"current_account-current_region": {
"bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}",
"objectKey": "9abf2316979a565d3a86e43cce5b25037aee1b5cd3885835f8b3daec825b517e.json",
"objectKey": "2cc27380df0d00a7348926c8fe9b6ae056ac18cee09087d824792e9611cde97e.json",
"assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}"
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,7 @@
"Type": "AWS::Lambda::Function",
"Properties": {
"Code": {
"ZipFile": "import boto3, json, os, time\n\necs = boto3.client('ecs')\nautoscaling = boto3.client('autoscaling')\n\n\ndef lambda_handler(event, context):\n print(json.dumps(event))\n cluster = os.environ['CLUSTER']\n snsTopicArn = event['Records'][0]['Sns']['TopicArn']\n lifecycle_event = json.loads(event['Records'][0]['Sns']['Message'])\n instance_id = lifecycle_event.get('EC2InstanceId')\n if not instance_id:\n print('Got event without EC2InstanceId: %s', json.dumps(event))\n return\n\n instance_arn = container_instance_arn(cluster, instance_id)\n print('Instance %s has container instance ARN %s' % (lifecycle_event['EC2InstanceId'], instance_arn))\n\n if not instance_arn:\n return\n\n task_arns = container_instance_task_arns(cluster, instance_arn)\n \n if task_arns:\n print('Instance ARN %s has task ARNs %s' % (instance_arn, ', '.join(task_arns)))\n\n while has_tasks(cluster, instance_arn, task_arns):\n time.sleep(10)\n\n try:\n print('Terminating instance %s' % instance_id)\n autoscaling.complete_lifecycle_action(\n LifecycleActionResult='CONTINUE',\n **pick(lifecycle_event, 'LifecycleHookName', 'LifecycleActionToken', 'AutoScalingGroupName'))\n except Exception as e:\n # Lifecycle action may have already completed.\n print(str(e))\n\n\ndef container_instance_arn(cluster, instance_id):\n \"\"\"Turn an instance ID into a container instance ARN.\"\"\"\n arns = ecs.list_container_instances(cluster=cluster, filter='ec2InstanceId==' + instance_id)['containerInstanceArns']\n if not arns:\n return None\n return arns[0]\n\ndef container_instance_task_arns(cluster, instance_arn):\n \"\"\"Fetch tasks for a container instance ARN.\"\"\"\n arns = ecs.list_tasks(cluster=cluster, containerInstance=instance_arn)['taskArns']\n return arns\n\ndef has_tasks(cluster, instance_arn, task_arns):\n \"\"\"Return True if the instance is running tasks for the given cluster.\"\"\"\n instances = ecs.describe_container_instances(cluster=cluster, containerInstances=[instance_arn])['containerInstances']\n if not instances:\n return False\n instance = instances[0]\n\n if instance['status'] == 'ACTIVE':\n # Start draining, then try again later\n set_container_instance_to_draining(cluster, instance_arn)\n return True\n\n task_count = None\n\n if task_arns:\n # Fetch details for tasks running on the container instance\n tasks = ecs.describe_tasks(cluster=cluster, tasks=task_arns)['tasks']\n if tasks:\n # Consider any non-stopped tasks as running\n task_count = sum(task['lastStatus'] != 'STOPPED' for task in tasks) + instance['pendingTasksCount']\n \n if not task_count:\n # Fallback to instance task counts if detailed task information is unavailable\n task_count = instance['runningTasksCount'] + instance['pendingTasksCount']\n \n print('Instance %s has %s tasks' % (instance_arn, task_count))\n\n return task_count > 0\n\ndef set_container_instance_to_draining(cluster, instance_arn):\n ecs.update_container_instances_state(\n cluster=cluster,\n containerInstances=[instance_arn], status='DRAINING')\n\n\ndef pick(dct, *keys):\n \"\"\"Pick a subset of a dict.\"\"\"\n return {k: v for k, v in dct.items() if k in keys}\n"
"ZipFile": "import boto3, json, os, time\n\necs = boto3.client('ecs')\nautoscaling = boto3.client('autoscaling')\n\n\ndef lambda_handler(event, context):\n print(json.dumps(dict(event, ResponseURL='...')))\n cluster = os.environ['CLUSTER']\n snsTopicArn = event['Records'][0]['Sns']['TopicArn']\n lifecycle_event = json.loads(event['Records'][0]['Sns']['Message'])\n instance_id = lifecycle_event.get('EC2InstanceId')\n if not instance_id:\n print('Got event without EC2InstanceId: %s', json.dumps(dict(event, ResponseURL='...')))\n return\n\n instance_arn = container_instance_arn(cluster, instance_id)\n print('Instance %s has container instance ARN %s' % (lifecycle_event['EC2InstanceId'], instance_arn))\n\n if not instance_arn:\n return\n\n task_arns = container_instance_task_arns(cluster, instance_arn)\n\n if task_arns:\n print('Instance ARN %s has task ARNs %s' % (instance_arn, ', '.join(task_arns)))\n\n while has_tasks(cluster, instance_arn, task_arns):\n time.sleep(10)\n\n try:\n print('Terminating instance %s' % instance_id)\n autoscaling.complete_lifecycle_action(\n LifecycleActionResult='CONTINUE',\n **pick(lifecycle_event, 'LifecycleHookName', 'LifecycleActionToken', 'AutoScalingGroupName'))\n except Exception as e:\n # Lifecycle action may have already completed.\n print(str(e))\n\n\ndef container_instance_arn(cluster, instance_id):\n \"\"\"Turn an instance ID into a container instance ARN.\"\"\"\n arns = ecs.list_container_instances(cluster=cluster, filter='ec2InstanceId==' + instance_id)['containerInstanceArns']\n if not arns:\n return None\n return arns[0]\n\ndef container_instance_task_arns(cluster, instance_arn):\n \"\"\"Fetch tasks for a container instance ARN.\"\"\"\n arns = ecs.list_tasks(cluster=cluster, containerInstance=instance_arn)['taskArns']\n return arns\n\ndef has_tasks(cluster, instance_arn, task_arns):\n \"\"\"Return True if the instance is running tasks for the given cluster.\"\"\"\n instances = ecs.describe_container_instances(cluster=cluster, containerInstances=[instance_arn])['containerInstances']\n if not instances:\n return False\n instance = instances[0]\n\n if instance['status'] == 'ACTIVE':\n # Start draining, then try again later\n set_container_instance_to_draining(cluster, instance_arn)\n return True\n\n task_count = None\n\n if task_arns:\n # Fetch details for tasks running on the container instance\n tasks = ecs.describe_tasks(cluster=cluster, tasks=task_arns)['tasks']\n if tasks:\n # Consider any non-stopped tasks as running\n task_count = sum(task['lastStatus'] != 'STOPPED' for task in tasks) + instance['pendingTasksCount']\n\n if not task_count:\n # Fallback to instance task counts if detailed task information is unavailable\n task_count = instance['runningTasksCount'] + instance['pendingTasksCount']\n\n print('Instance %s has %s tasks' % (instance_arn, task_count))\n\n return task_count > 0\n\ndef set_container_instance_to_draining(cluster, instance_arn):\n ecs.update_container_instances_state(\n cluster=cluster,\n containerInstances=[instance_arn], status='DRAINING')\n\n\ndef pick(dct, *keys):\n \"\"\"Pick a subset of a dict.\"\"\"\n return {k: v for k, v in dct.items() if k in keys}\n"
},
"Role": {
"Fn::GetAtt": [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,12 @@
"data": "myServiceTaskDefTaskRole1C1DE6CC"
}
],
"/aws-ecs-integ/myService/TaskDef/TaskRole/DefaultPolicy/Resource": [
{
"type": "aws:cdk:logicalId",
"data": "myServiceTaskDefTaskRoleDefaultPolicyD48473C0"
}
],
"/aws-ecs-integ/myService/TaskDef/Resource": [
{
"type": "aws:cdk:logicalId",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"path": "Tree",
"constructInfo": {
"fqn": "constructs.Construct",
"version": "10.0.9"
"version": "10.1.33"
}
},
"aws-ecs-integ": {
Expand Down Expand Up @@ -1143,7 +1143,7 @@
"aws:cdk:cloudformation:type": "AWS::Lambda::Function",
"aws:cdk:cloudformation:props": {
"code": {
"zipFile": "import boto3, json, os, time\n\necs = boto3.client('ecs')\nautoscaling = boto3.client('autoscaling')\n\n\ndef lambda_handler(event, context):\n print(json.dumps(event))\n cluster = os.environ['CLUSTER']\n snsTopicArn = event['Records'][0]['Sns']['TopicArn']\n lifecycle_event = json.loads(event['Records'][0]['Sns']['Message'])\n instance_id = lifecycle_event.get('EC2InstanceId')\n if not instance_id:\n print('Got event without EC2InstanceId: %s', json.dumps(event))\n return\n\n instance_arn = container_instance_arn(cluster, instance_id)\n print('Instance %s has container instance ARN %s' % (lifecycle_event['EC2InstanceId'], instance_arn))\n\n if not instance_arn:\n return\n\n task_arns = container_instance_task_arns(cluster, instance_arn)\n \n if task_arns:\n print('Instance ARN %s has task ARNs %s' % (instance_arn, ', '.join(task_arns)))\n\n while has_tasks(cluster, instance_arn, task_arns):\n time.sleep(10)\n\n try:\n print('Terminating instance %s' % instance_id)\n autoscaling.complete_lifecycle_action(\n LifecycleActionResult='CONTINUE',\n **pick(lifecycle_event, 'LifecycleHookName', 'LifecycleActionToken', 'AutoScalingGroupName'))\n except Exception as e:\n # Lifecycle action may have already completed.\n print(str(e))\n\n\ndef container_instance_arn(cluster, instance_id):\n \"\"\"Turn an instance ID into a container instance ARN.\"\"\"\n arns = ecs.list_container_instances(cluster=cluster, filter='ec2InstanceId==' + instance_id)['containerInstanceArns']\n if not arns:\n return None\n return arns[0]\n\ndef container_instance_task_arns(cluster, instance_arn):\n \"\"\"Fetch tasks for a container instance ARN.\"\"\"\n arns = ecs.list_tasks(cluster=cluster, containerInstance=instance_arn)['taskArns']\n return arns\n\ndef has_tasks(cluster, instance_arn, task_arns):\n \"\"\"Return True if the instance is running tasks for the given cluster.\"\"\"\n instances = ecs.describe_container_instances(cluster=cluster, containerInstances=[instance_arn])['containerInstances']\n if not instances:\n return False\n instance = instances[0]\n\n if instance['status'] == 'ACTIVE':\n # Start draining, then try again later\n set_container_instance_to_draining(cluster, instance_arn)\n return True\n\n task_count = None\n\n if task_arns:\n # Fetch details for tasks running on the container instance\n tasks = ecs.describe_tasks(cluster=cluster, tasks=task_arns)['tasks']\n if tasks:\n # Consider any non-stopped tasks as running\n task_count = sum(task['lastStatus'] != 'STOPPED' for task in tasks) + instance['pendingTasksCount']\n \n if not task_count:\n # Fallback to instance task counts if detailed task information is unavailable\n task_count = instance['runningTasksCount'] + instance['pendingTasksCount']\n \n print('Instance %s has %s tasks' % (instance_arn, task_count))\n\n return task_count > 0\n\ndef set_container_instance_to_draining(cluster, instance_arn):\n ecs.update_container_instances_state(\n cluster=cluster,\n containerInstances=[instance_arn], status='DRAINING')\n\n\ndef pick(dct, *keys):\n \"\"\"Pick a subset of a dict.\"\"\"\n return {k: v for k, v in dct.items() if k in keys}\n"
"zipFile": "import boto3, json, os, time\n\necs = boto3.client('ecs')\nautoscaling = boto3.client('autoscaling')\n\n\ndef lambda_handler(event, context):\n print(json.dumps(dict(event, ResponseURL='...')))\n cluster = os.environ['CLUSTER']\n snsTopicArn = event['Records'][0]['Sns']['TopicArn']\n lifecycle_event = json.loads(event['Records'][0]['Sns']['Message'])\n instance_id = lifecycle_event.get('EC2InstanceId')\n if not instance_id:\n print('Got event without EC2InstanceId: %s', json.dumps(dict(event, ResponseURL='...')))\n return\n\n instance_arn = container_instance_arn(cluster, instance_id)\n print('Instance %s has container instance ARN %s' % (lifecycle_event['EC2InstanceId'], instance_arn))\n\n if not instance_arn:\n return\n\n task_arns = container_instance_task_arns(cluster, instance_arn)\n\n if task_arns:\n print('Instance ARN %s has task ARNs %s' % (instance_arn, ', '.join(task_arns)))\n\n while has_tasks(cluster, instance_arn, task_arns):\n time.sleep(10)\n\n try:\n print('Terminating instance %s' % instance_id)\n autoscaling.complete_lifecycle_action(\n LifecycleActionResult='CONTINUE',\n **pick(lifecycle_event, 'LifecycleHookName', 'LifecycleActionToken', 'AutoScalingGroupName'))\n except Exception as e:\n # Lifecycle action may have already completed.\n print(str(e))\n\n\ndef container_instance_arn(cluster, instance_id):\n \"\"\"Turn an instance ID into a container instance ARN.\"\"\"\n arns = ecs.list_container_instances(cluster=cluster, filter='ec2InstanceId==' + instance_id)['containerInstanceArns']\n if not arns:\n return None\n return arns[0]\n\ndef container_instance_task_arns(cluster, instance_arn):\n \"\"\"Fetch tasks for a container instance ARN.\"\"\"\n arns = ecs.list_tasks(cluster=cluster, containerInstance=instance_arn)['taskArns']\n return arns\n\ndef has_tasks(cluster, instance_arn, task_arns):\n \"\"\"Return True if the instance is running tasks for the given cluster.\"\"\"\n instances = ecs.describe_container_instances(cluster=cluster, containerInstances=[instance_arn])['containerInstances']\n if not instances:\n return False\n instance = instances[0]\n\n if instance['status'] == 'ACTIVE':\n # Start draining, then try again later\n set_container_instance_to_draining(cluster, instance_arn)\n return True\n\n task_count = None\n\n if task_arns:\n # Fetch details for tasks running on the container instance\n tasks = ecs.describe_tasks(cluster=cluster, tasks=task_arns)['tasks']\n if tasks:\n # Consider any non-stopped tasks as running\n task_count = sum(task['lastStatus'] != 'STOPPED' for task in tasks) + instance['pendingTasksCount']\n\n if not task_count:\n # Fallback to instance task counts if detailed task information is unavailable\n task_count = instance['runningTasksCount'] + instance['pendingTasksCount']\n\n print('Instance %s has %s tasks' % (instance_arn, task_count))\n\n return task_count > 0\n\ndef set_container_instance_to_draining(cluster, instance_arn):\n ecs.update_container_instances_state(\n cluster=cluster,\n containerInstances=[instance_arn], status='DRAINING')\n\n\ndef pick(dct, *keys):\n \"\"\"Pick a subset of a dict.\"\"\"\n return {k: v for k, v in dct.items() if k in keys}\n"
},
"role": {
"Fn::GetAtt": [
Expand Down Expand Up @@ -1240,7 +1240,7 @@
},
"constructInfo": {
"fqn": "constructs.Construct",
"version": "10.0.9"
"version": "10.1.33"
}
},
"LifecycleHookDrainHook": {
Expand Down Expand Up @@ -1735,6 +1735,54 @@
"fqn": "@aws-cdk/aws-iam.CfnRole",
"version": "0.0.0"
}
},
"DefaultPolicy": {
"id": "DefaultPolicy",
"path": "aws-ecs-integ/myService/TaskDef/TaskRole/DefaultPolicy",
"children": {
"Resource": {
"id": "Resource",
"path": "aws-ecs-integ/myService/TaskDef/TaskRole/DefaultPolicy/Resource",
"attributes": {
"aws:cdk:cloudformation:type": "AWS::IAM::Policy",
"aws:cdk:cloudformation:props": {
"policyDocument": {
"Statement": [
{
"Action": [
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"ssmmessages:CreateControlChannel",
"ssmmessages:CreateDataChannel",
"ssmmessages:OpenControlChannel",
"ssmmessages:OpenDataChannel"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"policyName": "myServiceTaskDefTaskRoleDefaultPolicyD48473C0",
"roles": [
{
"Ref": "myServiceTaskDefTaskRole1C1DE6CC"
}
]
}
},
"constructInfo": {
"fqn": "@aws-cdk/aws-iam.CfnPolicy",
"version": "0.0.0"
}
}
},
"constructInfo": {
"fqn": "@aws-cdk/aws-iam.Policy",
"version": "0.0.0"
}
}
},
"constructInfo": {
Expand Down Expand Up @@ -1942,6 +1990,7 @@
"minimumHealthyPercent": 50
},
"enableEcsManagedTags": false,
"enableExecuteCommand": true,
"healthCheckGracePeriodSeconds": 60,
"launchType": "EC2",
"loadBalancers": [
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
{
"version": "17.0.0",
"version": "20.0.0",
"files": {
"62462a0ca4f5e1fde18ddd2e2ad6537c111d2bd84b82653a8628047c552bca14": {
"ef02919701446f0e1c908b12e2e437448544c6ac7df2f65225cb0cd189188fd1": {
"source": {
"path": "aws-ecs-integ-ecs.template.json",
"packaging": "file"
},
"destinations": {
"current_account-current_region": {
"bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}",
"objectKey": "62462a0ca4f5e1fde18ddd2e2ad6537c111d2bd84b82653a8628047c552bca14.json",
"objectKey": "ef02919701446f0e1c908b12e2e437448544c6ac7df2f65225cb0cd189188fd1.json",
"assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}"
}
}
Expand Down
Loading

0 comments on commit 6b4f92f

Please sign in to comment.