Skip to content

Commit

Permalink
fix: adding deletion protection for load balancer (#39)
Browse files Browse the repository at this point in the history
This commit enables customers to create the load-balancers created
in HealthMonitor and RenderQueue with delete protection.

* Updated the unit tests to validate the change.
* Deployed the kitchen-sink app and verified its functionality.
  • Loading branch information
yashda authored Aug 6, 2020
1 parent 43a211b commit cda4954
Show file tree
Hide file tree
Showing 7 changed files with 139 additions and 28 deletions.
15 changes: 13 additions & 2 deletions packages/aws-rfdk/lib/core/lib/health-monitor.ts
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ export interface HealthMonitorProps {

/**
* Describes the current Elastic Load Balancing resource limits for your AWS account.
* This object should be the output of 'describeAccountLimits' API
* This object should be the output of 'describeAccountLimits' API.
*
* @default default account limits for ALB is used
*
Expand All @@ -189,6 +189,17 @@ export interface HealthMonitorProps {
* @default A new Key will be created and used.
*/
readonly encryptionKey?: IKey;

/**
* Indicates whether deletion protection is enabled for the LoadBalancer.
*
* @default true
*
* Note: This value is true by default which means that the deletion protection is enabled for the
* load balancer. Hence, user needs to disable it using AWS Console or CLI before deleting the stack.
* @see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#deletion-protection
*/
readonly deletionProtection?: boolean;
}

/**
Expand Down Expand Up @@ -389,7 +400,7 @@ export class HealthMonitor extends HealthMonitorBase {
const {loadBalancer, targetGroup} = this.lbFactory.registerWorkerFleet(
monitorableFleet,
healthCheckConfig,
this.props.elbAccountLimits);
this.props);

this.createFleetAlarms(monitorableFleet, healthCheckConfig, loadBalancer, targetGroup);
}
Expand Down
32 changes: 19 additions & 13 deletions packages/aws-rfdk/lib/core/lib/load-balancer-manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import {
HealthMonitor,
IMonitorableFleet,
Limit,
HealthMonitorProps,
} from './health-monitor';

/**
Expand Down Expand Up @@ -108,7 +109,7 @@ export class LoadBalancerFactory {
public registerWorkerFleet(
fleet: IMonitorableFleet,
healthCheckConfig: HealthCheckConfig,
elbAccountLimits?: Limit[]): {
healthMonitorProps: HealthMonitorProps): {
loadBalancer: ApplicationLoadBalancer,
listener: ApplicationListener,
targetGroup: ApplicationTargetGroup
Expand All @@ -126,7 +127,7 @@ export class LoadBalancerFactory {
loadBalancer,
fleet,
healthCheckConfig,
elbAccountLimits);
healthMonitorProps);

loadBalancerParent = loadBalancer;
listenerParent = listener;
Expand All @@ -149,7 +150,8 @@ export class LoadBalancerFactory {
// accommodate fleet, create a new one and register
loadBalancerParent = this.createLoadBalancer(
this.healthMonitorScope,
this.loadBalancerMap.size);
this.loadBalancerMap.size,
healthMonitorProps);
const loadBalancerManager = new LoadBalancerManager();

// Add it to the map
Expand All @@ -161,7 +163,7 @@ export class LoadBalancerFactory {
loadBalancerParent,
fleet,
healthCheckConfig,
elbAccountLimits);
healthMonitorProps);

listenerParent = listener;
targetGroupParent = targetGroup;
Expand Down Expand Up @@ -189,10 +191,14 @@ export class LoadBalancerFactory {
* @param scope
* @param loadBalancerindex
*/
private createLoadBalancer(scope: Construct, loadBalancerindex: number): ApplicationLoadBalancer {
private createLoadBalancer(scope: Construct,
loadBalancerindex: number,
healthMonitorProps: HealthMonitorProps,
): ApplicationLoadBalancer {
return new ApplicationLoadBalancer(scope, `ALB_${loadBalancerindex}`, {
vpc: this.vpc,
internetFacing: false,
deletionProtection: healthMonitorProps.deletionProtection ?? true,
});
}
}
Expand Down Expand Up @@ -222,7 +228,7 @@ class LoadBalancerManager {
loadBalancer: ApplicationLoadBalancer,
fleet: IMonitorableFleet,
healthCheckConfig: HealthCheckConfig,
elbAccountLimits?: Limit[]) {
healthMonitorProps: HealthMonitorProps) {

// this initializes with 0 and keeps the track of all components
// newly added down the hierarchy.
Expand All @@ -233,7 +239,7 @@ class LoadBalancerManager {
// check for target limit in load balancer
const targetPerLoadBalancerLimit = LoadBalancerFactory.getAccountLimit('targets-per-application-load-balancer',
LoadBalancerFactory.DEFAULT_TARGETS_PER_APPLICATION_LOAD_BALANCER,
elbAccountLimits);
healthMonitorProps.elbAccountLimits);
if ((this.loadBalancerComponentCount.targetCount + fleet.targetCapacity) > targetPerLoadBalancerLimit) {
throw new AWSLimitExhaustedError('AWS service limit "targets-per-application-load-balancer" reached. Limit: ' +
targetPerLoadBalancerLimit);
Expand All @@ -242,7 +248,7 @@ class LoadBalancerManager {
// check for target group limit in load balancer
const targetGroupsPerLoadBalancerLimit = LoadBalancerFactory.getAccountLimit('target-groups-per-application-load-balancer',
LoadBalancerFactory.DEFAULT_TARGET_GROUPS_PER_APPLICATION_LOAD_BALANCER,
elbAccountLimits);
healthMonitorProps.elbAccountLimits);
if ((this.loadBalancerComponentCount.targetGroupCount + 1) > targetGroupsPerLoadBalancerLimit) {
throw new AWSLimitExhaustedError('AWS service limit "target-groups-per-application-load-balancer" reached. Limit: ' +
targetGroupsPerLoadBalancerLimit);
Expand All @@ -260,7 +266,7 @@ class LoadBalancerManager {
listener,
fleet,
healthCheckConfig,
elbAccountLimits);
healthMonitorProps);

statsDelta.add(componentsAdded);
listenerParent = listener;
Expand All @@ -283,7 +289,7 @@ class LoadBalancerManager {

const listenersPerLoadBalancerLimit = LoadBalancerFactory.getAccountLimit('listeners-per-application-load-balancer',
LoadBalancerFactory.DEFAULT_LISTENERS_PER_APPLICATION_LOAD_BALANCER,
elbAccountLimits);
healthMonitorProps.elbAccountLimits);
if ((this.loadBalancerComponentCount.listenerCount + 1) > listenersPerLoadBalancerLimit) {
throw new AWSLimitExhaustedError('AWS service limit "listeners-per-application-load-balancer" reached. Limit: ' +
listenersPerLoadBalancerLimit);
Expand All @@ -301,7 +307,7 @@ class LoadBalancerManager {
listenerParent,
fleet,
healthCheckConfig,
elbAccountLimits);
healthMonitorProps);

targetGroupParent = targetGroup;
statsDelta.add(componentsAdded);
Expand Down Expand Up @@ -364,7 +370,7 @@ class ListenerManager {
listener: ApplicationListener,
fleet: IMonitorableFleet,
healthCheckConfig: HealthCheckConfig,
elbAccountLimits?: Limit[]) {
healthMonitorProps: HealthMonitorProps) {

const componentsAdded = new LoadBalancerComponentStats();

Expand All @@ -373,7 +379,7 @@ class ListenerManager {
// check for target limit in listener
const targetGroupPerLoadBalancerLimit = LoadBalancerFactory.getAccountLimit('target-groups-per-action-on-application-load-balancer',
LoadBalancerFactory.DEFAULT_TARGET_GROUPS_PER_ACTION_ON_APPLICATION_LOAD_BALANCER,
elbAccountLimits);
healthMonitorProps.elbAccountLimits);
if ((this.listenerComponentCount.targetGroupCount + 1) > targetGroupPerLoadBalancerLimit) {
throw new AWSLimitExhaustedError('AWS service limit "target-groups-per-action-on-application-load-balancer" reached. Limit: ' +
targetGroupPerLoadBalancerLimit);
Expand Down
43 changes: 42 additions & 1 deletion packages/aws-rfdk/lib/core/test/health-monitor.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import {
expect as expectCDK,
haveResource,
haveResourceLike,
ABSENT,
} from '@aws-cdk/assert';
import {
AutoScalingGroup,
Expand Down Expand Up @@ -319,7 +320,15 @@ test('2 ASG gets registered to same LB', () => {
healthMonitor.registerFleet(fleet2, {port: 7171});

// THEN
expectCDK(hmStack).to(countResources('AWS::ElasticLoadBalancingV2::LoadBalancer', 1));
expectCDK(hmStack).to(countResourcesLike('AWS::ElasticLoadBalancingV2::LoadBalancer', 1, {
LoadBalancerAttributes: [
{
Key: 'deletion_protection.enabled',
Value: 'true',
},
],
Scheme: 'internal',
}));
expectCDK(wfStack).to(countResources('AWS::ElasticLoadBalancingV2::Listener', 2));
expectCDK(wfStack).to(haveResource('AWS::ElasticLoadBalancingV2::Listener'));
expectCDK(wfStack).to(haveResourceLike('AWS::ElasticLoadBalancingV2::TargetGroup', {
Expand Down Expand Up @@ -372,6 +381,12 @@ test('validating LB target limit', () => {

// THEN
expectCDK(hmStack).to(countResourcesLike('AWS::ElasticLoadBalancingV2::LoadBalancer', 2, {
LoadBalancerAttributes: [
{
Key: 'deletion_protection.enabled',
Value: 'true',
},
],
Scheme: 'internal',
Type: 'application',
}));
Expand Down Expand Up @@ -407,6 +422,12 @@ test('validating LB listener limit', () => {

// THEN
expectCDK(hmStack).to(countResourcesLike('AWS::ElasticLoadBalancingV2::LoadBalancer', 2, {
LoadBalancerAttributes: [
{
Key: 'deletion_protection.enabled',
Value: 'true',
},
],
Scheme: 'internal',
Type: 'application',
}));
Expand Down Expand Up @@ -466,4 +487,24 @@ test('validating target limit exhaustion', () => {
expect(() => {
healthMonitor.registerFleet(fleet, {});
}).toThrowError(/AWS service limit \"targets-per-application-load-balancer\" reached. Limit: 1/);
});

test('validating deletion protection', () => {
// WHEN
healthMonitor = new HealthMonitor(hmStack, 'healthMonitor2', {
vpc,
deletionProtection: false,
});

const fleet = new TestMonitorableFleet(wfStack, 'workerFleet', {
vpc,
});
healthMonitor.registerFleet(fleet, {});

// THEN
expectCDK(hmStack).to(haveResourceLike('AWS::ElasticLoadBalancingV2::LoadBalancer', {
LoadBalancerAttributes: ABSENT,
Scheme: 'internal',
Type: 'application',
}));
});
11 changes: 11 additions & 0 deletions packages/aws-rfdk/lib/deadline/lib/render-queue-ref.ts
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,17 @@ export interface RenderQueueProps {
* @default - LogGroup will be created with all properties' default values and a prefix of "/renderfarm/".
*/
readonly logGroupProps?: LogGroupFactoryProps;

/**
* Indicates whether deletion protection is enabled for the LoadBalancer.
*
* @default true
*
* Note: This value is true by default which means that the deletion protection is enabled for the
* load balancer. Hence, user needs to disable it using AWS Console or CLI before deleting the stack.
* @see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#deletion-protection
*/
readonly deletionProtection?: boolean;
}

/**
Expand Down
8 changes: 7 additions & 1 deletion packages/aws-rfdk/lib/deadline/lib/render-queue.ts
Original file line number Diff line number Diff line change
Expand Up @@ -318,14 +318,20 @@ export class RenderQueue extends RenderQueueBase implements IGrantable {
loadBalancerFQDN = `${label}.${props.hostname.zone.zoneName}`;
}

const loadBalancer = new ApplicationLoadBalancer(this, 'LB', {
vpc: this.cluster.vpc,
internetFacing: false,
deletionProtection: props.deletionProtection ?? true,
});

this.pattern = new ApplicationLoadBalancedEc2Service(this, 'AlbEc2ServicePattern', {
certificate: this.clientCert,
cluster: this.cluster,
desiredCount: props.renderQueueSize?.desired,
domainZone: props.hostname?.zone,
domainName: loadBalancerFQDN,
listenerPort: externalPortNumber,
publicLoadBalancer: false,
loadBalancer,
protocol: externalProtocol,
taskDefinition,
// This is required to right-size our host capacity and not have the ECS service block on updates. We set a memory
Expand Down
Loading

0 comments on commit cda4954

Please sign in to comment.