From 1e2cff19eec234e1d1f7f501230cba01b220a09b Mon Sep 17 00:00:00 2001 From: paulhcsun <47882901+paulhcsun@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:51:40 -0700 Subject: [PATCH] chore(kinesisfirehose-alpha): replace`destinations` property with `destination` and change type from array to single IDestination (#31630) ### Reason for this change Setting a destination for your Delivery Stream was previously done by passing in an array of Destinations but with a restriction that there there could only be one Destination in that array. This property type does not make sense for the current user experience (have an array but can only specify one destination) and also does not align with the behaviour in the AWS Console which only allows you to select a single destination. If Kinesis Firehose ever supports multiple destinations in the future then we can add a new property to support that which will not be a breaking change. ### Description of changes BREAKING CHANGE: replaced `destinations` property with `destination` (singular) and changed the type from array of Destinations to a single Destination. Old behaviour would only allow an array with a single Destination to be passed in anyway. ### Description of how you validated changes unit tests + no integ snapshot changes. ### Checklist - [x] My code adheres to the [CONTRIBUTING GUIDE](https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md) and [DESIGN GUIDELINES](https://github.com/aws/aws-cdk/blob/main/docs/DESIGN_GUIDELINES.md) ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/aws-iot-actions-alpha/README.md | 2 +- .../integ.firehose-put-record-action.ts | 2 +- .../aws-kinesisfirehose-alpha/README.md | 37 ++++++------ .../lib/delivery-stream.ts | 12 +--- .../test/delivery-stream.test.ts | 57 ++++++++---------- .../integ.delivery-stream.source-stream.ts | 4 +- .../test/integ.delivery-stream.ts | 4 +- .../integ.kinesis-stream-events-target.ts | 2 +- .../test/integ.s3-bucket.lit.ts | 8 +-- .../test/s3-bucket.test.ts | 60 +++++++++---------- packages/aws-cdk-lib/aws-logs/README.md | 2 +- 11 files changed, 86 insertions(+), 104 deletions(-) diff --git a/packages/@aws-cdk/aws-iot-actions-alpha/README.md b/packages/@aws-cdk/aws-iot-actions-alpha/README.md index c1a01bc1b10c2..3cd0a687f723d 100644 --- a/packages/@aws-cdk/aws-iot-actions-alpha/README.md +++ b/packages/@aws-cdk/aws-iot-actions-alpha/README.md @@ -243,7 +243,7 @@ import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations-alpha'; const bucket = new s3.Bucket(this, 'MyBucket'); const stream = new firehose.DeliveryStream(this, 'MyStream', { - destinations: [new destinations.S3Bucket(bucket)], + destination: new destinations.S3Bucket(bucket), }); const topicRule = new iot.TopicRule(this, 'TopicRule', { diff --git a/packages/@aws-cdk/aws-iot-actions-alpha/test/kinesis-firehose/integ.firehose-put-record-action.ts b/packages/@aws-cdk/aws-iot-actions-alpha/test/kinesis-firehose/integ.firehose-put-record-action.ts index d506a2d78e8f0..8dfb68be0d7ea 100644 --- a/packages/@aws-cdk/aws-iot-actions-alpha/test/kinesis-firehose/integ.firehose-put-record-action.ts +++ b/packages/@aws-cdk/aws-iot-actions-alpha/test/kinesis-firehose/integ.firehose-put-record-action.ts @@ -21,7 +21,7 @@ class TestStack extends cdk.Stack { removalPolicy: cdk.RemovalPolicy.DESTROY, }); const stream = new firehose.DeliveryStream(this, 'MyStream', { - destinations: [new destinations.S3Bucket(bucket)], + destination: new destinations.S3Bucket(bucket), }); topicRule.addAction( new actions.FirehosePutRecordAction(stream, { diff --git a/packages/@aws-cdk/aws-kinesisfirehose-alpha/README.md b/packages/@aws-cdk/aws-kinesisfirehose-alpha/README.md index 31f8195003d7f..c299eb018e3fb 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-alpha/README.md +++ b/packages/@aws-cdk/aws-kinesisfirehose-alpha/README.md @@ -41,7 +41,7 @@ used as a destination. More supported destinations are covered [below](#destinat ```ts const bucket = new s3.Bucket(this, 'Bucket'); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [new destinations.S3Bucket(bucket)], + destination: new destinations.S3Bucket(bucket), }); ``` @@ -71,7 +71,7 @@ declare const destination: firehose.IDestination; const sourceStream = new kinesis.Stream(this, 'Source Stream'); new firehose.DeliveryStream(this, 'Delivery Stream', { sourceStream: sourceStream, - destinations: [destination], + destination: destination, }); ``` @@ -108,7 +108,7 @@ declare const bucket: s3.Bucket; const s3Destination = new destinations.S3Bucket(bucket); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [s3Destination], + destination: s3Destination, }); ``` @@ -154,18 +154,18 @@ declare const destination: firehose.IDestination; // SSE with an AWS-owned key new firehose.DeliveryStream(this, 'Delivery Stream AWS Owned', { encryption: firehose.StreamEncryption.awsOwnedKey(), - destinations: [destination], + destination: destination, }); // SSE with an customer-managed key that is created automatically by the CDK new firehose.DeliveryStream(this, 'Delivery Stream Implicit Customer Managed', { encryption: firehose.StreamEncryption.customerManagedKey(), - destinations: [destination], + destination: destination, }); // SSE with an customer-managed key that is explicitly specified declare const key: kms.Key; new firehose.DeliveryStream(this, 'Delivery Stream Explicit Customer Managed', { encryption: firehose.StreamEncryption.customerManagedKey(key), - destinations: [destination], + destination: destination, }); ``` @@ -196,7 +196,7 @@ const destination = new destinations.S3Bucket(bucket, { }); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [destination], + destination: destination, }); ``` @@ -208,7 +208,7 @@ const destination = new destinations.S3Bucket(bucket, { loggingConfig: new destinations.DisableLogging(), }); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [destination], + destination: destination, }); ``` @@ -271,7 +271,7 @@ const s3Destination = new destinations.S3Bucket(bucket, { compression: destinations.Compression.SNAPPY, }); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [s3Destination], + destination: s3Destination, }); ``` @@ -292,7 +292,7 @@ const destination = new destinations.S3Bucket(bucket, { bufferingSize: Size.mebibytes(8), }); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [destination], + destination: destination, }); ``` @@ -309,7 +309,7 @@ const destination = new destinations.S3Bucket(bucket, { bufferingInterval: Duration.seconds(0), }); new firehose.DeliveryStream(this, 'ZeroBufferDeliveryStream', { - destinations: [destination], + destination: destination, }); ``` @@ -332,7 +332,7 @@ const destination = new destinations.S3Bucket(bucket, { encryptionKey: key, }); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [destination], + destination: destination, }); ``` @@ -350,35 +350,32 @@ backed up to S3. // Enable backup of all source records (to an S3 bucket created by CDK). declare const bucket: s3.Bucket; new firehose.DeliveryStream(this, 'Delivery Stream Backup All', { - destinations: [ + destination: new destinations.S3Bucket(bucket, { s3Backup: { mode: destinations.BackupMode.ALL, }, }), - ], }); // Explicitly provide an S3 bucket to which all source records will be backed up. declare const backupBucket: s3.Bucket; new firehose.DeliveryStream(this, 'Delivery Stream Backup All Explicit Bucket', { - destinations: [ + destination: new destinations.S3Bucket(bucket, { s3Backup: { bucket: backupBucket, }, }), - ], }); // Explicitly provide an S3 prefix under which all source records will be backed up. new firehose.DeliveryStream(this, 'Delivery Stream Backup All Explicit Prefix', { - destinations: [ + destination: new destinations.S3Bucket(bucket, { s3Backup: { mode: destinations.BackupMode.ALL, dataOutputPrefix: 'mybackup', }, }), - ], }); ``` @@ -431,7 +428,7 @@ const s3Destination = new destinations.S3Bucket(bucket, { processor: lambdaProcessor, }); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [s3Destination], + destination: s3Destination, }); ``` @@ -473,7 +470,7 @@ const destinationRole = new iam.Role(this, 'Destination Role', { declare const bucket: s3.Bucket; const destination = new destinations.S3Bucket(bucket, { role: destinationRole }); new firehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [destination], + destination: destination, role: deliveryStreamRole, }); ``` diff --git a/packages/@aws-cdk/aws-kinesisfirehose-alpha/lib/delivery-stream.ts b/packages/@aws-cdk/aws-kinesisfirehose-alpha/lib/delivery-stream.ts index 4c97fa0aa6e96..737ba07d80574 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-alpha/lib/delivery-stream.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose-alpha/lib/delivery-stream.ts @@ -185,11 +185,9 @@ export enum StreamEncryptionType { */ export interface DeliveryStreamProps { /** - * The destinations that this delivery stream will deliver data to. - * - * Only a singleton array is supported at this time. + * The destination that this delivery stream will deliver data to. */ - readonly destinations: IDestination[]; + readonly destination: IDestination; /** * A name for the delivery stream. @@ -324,10 +322,6 @@ export class DeliveryStream extends DeliveryStreamBase { this._role = props.role; - if (props.destinations.length !== 1) { - throw new Error(`Only one destination is allowed per delivery stream, given ${props.destinations.length}`); - } - if (props.encryption?.encryptionKey || props.sourceStream) { this._role = this._role ?? new iam.Role(this, 'Service Role', { assumedBy: new iam.ServicePrincipal('firehose.amazonaws.com'), @@ -369,7 +363,7 @@ export class DeliveryStream extends DeliveryStreamBase { readStreamGrant = props.sourceStream.grantRead(this._role); } - const destinationConfig = props.destinations[0].bind(this, {}); + const destinationConfig = props.destination.bind(this, {}); const resource = new CfnDeliveryStream(this, 'Resource', { deliveryStreamEncryptionConfigurationInput: encryptionConfig, diff --git a/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/delivery-stream.test.ts b/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/delivery-stream.test.ts index 6d4a7163be537..c9365eb2ee35e 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/delivery-stream.test.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/delivery-stream.test.ts @@ -46,7 +46,7 @@ describe('delivery stream', () => { test('creates stream with default values', () => { new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -63,7 +63,7 @@ describe('delivery stream', () => { test('creates stream with events target V2 class', () => { const stream = new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); new events.Rule(stack, 'rule', { @@ -102,7 +102,7 @@ describe('delivery stream', () => { }); const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, role: role, }); @@ -111,7 +111,7 @@ describe('delivery stream', () => { test('not providing sourceStream or encryptionKey creates only one role (used for S3 destination)', () => { new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); Template.fromStack(stack).hasResourceProperties('AWS::IAM::Role', { @@ -133,7 +133,7 @@ describe('delivery stream', () => { const sourceStream = new kinesis.Stream(stack, 'Source Stream'); new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, sourceStream: sourceStream, }); @@ -156,7 +156,7 @@ describe('delivery stream', () => { const key = new kms.Key(stack, 'Key'); new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: StreamEncryption.customerManagedKey(key), }); @@ -179,7 +179,7 @@ describe('delivery stream', () => { const sourceStream = new kinesis.Stream(stack, 'Source Stream'); new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, sourceStream: sourceStream, role: deliveryStreamRole, }); @@ -215,7 +215,7 @@ describe('delivery stream', () => { test('requesting customer-owned encryption creates key and configuration', () => { new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: firehose.StreamEncryption.customerManagedKey(), role: deliveryStreamRole, }); @@ -251,7 +251,7 @@ describe('delivery stream', () => { const key = new kms.Key(stack, 'Key'); new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: StreamEncryption.customerManagedKey(key), role: deliveryStreamRole, }); @@ -281,7 +281,7 @@ describe('delivery stream', () => { test('requesting AWS-owned key does not create key and creates configuration', () => { new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: firehose.StreamEncryption.awsOwnedKey(), role: deliveryStreamRole, }); @@ -299,7 +299,7 @@ describe('delivery stream', () => { test('requesting no encryption creates no configuration', () => { new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: firehose.StreamEncryption.unencrypted(), role: deliveryStreamRole, }); @@ -316,17 +316,17 @@ describe('delivery stream', () => { const sourceStream = new kinesis.Stream(stack, 'Source Stream'); expect(() => new firehose.DeliveryStream(stack, 'Delivery Stream 1', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: firehose.StreamEncryption.awsOwnedKey(), sourceStream, })).toThrowError('Requested server-side encryption but delivery stream source is a Kinesis data stream. Specify server-side encryption on the data stream instead.'); expect(() => new firehose.DeliveryStream(stack, 'Delivery Stream 2', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: firehose.StreamEncryption.customerManagedKey(), sourceStream, })).toThrowError('Requested server-side encryption but delivery stream source is a Kinesis data stream. Specify server-side encryption on the data stream instead.'); expect(() => new firehose.DeliveryStream(stack, 'Delivery Stream 3', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: StreamEncryption.customerManagedKey(new kms.Key(stack, 'Key')), sourceStream, })).toThrowError('Requested server-side encryption but delivery stream source is a Kinesis data stream. Specify server-side encryption on the data stream instead.'); @@ -337,7 +337,7 @@ describe('delivery stream', () => { assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), }); const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); deliveryStream.grant(role, 'firehose:PutRecord'); @@ -360,7 +360,7 @@ describe('delivery stream', () => { assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), }); const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); deliveryStream.grantPutRecords(role); @@ -385,7 +385,7 @@ describe('delivery stream', () => { const dependableId = stack.resolve((Node.of(dependable).defaultChild as cdk.CfnResource).logicalId); new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); Template.fromStack(stack).hasResource('AWS::KinesisFirehose::DeliveryStream', { @@ -396,18 +396,9 @@ describe('delivery stream', () => { }); }); - test('supplying 0 or multiple destinations throws', () => { - expect(() => new firehose.DeliveryStream(stack, 'No Destinations', { - destinations: [], - })).toThrowError(/Only one destination is allowed per delivery stream/); - expect(() => new firehose.DeliveryStream(stack, 'Too Many Destinations', { - destinations: [mockS3Destination, mockS3Destination], - })).toThrowError(/Only one destination is allowed per delivery stream/); - }); - test('creating new stream should return IAM role when calling getter for grantPrincipal (backwards compatibility)', () => { const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); expect(deliveryStream.grantPrincipal).toBeInstanceOf(iam.Role); }); @@ -418,7 +409,7 @@ describe('delivery stream', () => { beforeEach(() => { stack = new cdk.Stack(undefined, undefined, { env: { account: '000000000000', region: 'us-west-1' } }); deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); }); @@ -516,7 +507,7 @@ describe('delivery stream', () => { const vpc = new ec2.Vpc(stack, 'VPC'); const securityGroup = new ec2.SecurityGroup(stack, 'Security Group', { vpc }); const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); securityGroup.connections.allowFrom(deliveryStream, ec2.Port.allTcp()); @@ -542,7 +533,7 @@ describe('delivery stream', () => { const vpc = new ec2.Vpc(stack, 'VPC'); const securityGroup = new ec2.SecurityGroup(stack, 'Security Group', { vpc }); const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); securityGroup.connections.allowFrom(deliveryStream, ec2.Port.allTcp()); @@ -558,10 +549,10 @@ describe('delivery stream', () => { test('only adds one Firehose IP address mapping to stack even if multiple delivery streams defined', () => { new firehose.DeliveryStream(stack, 'Delivery Stream 1', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); new firehose.DeliveryStream(stack, 'Delivery Stream 2', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); Template.fromStack(stack).hasMapping('*', { @@ -573,7 +564,7 @@ describe('delivery stream', () => { test('can add tags', () => { const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); cdk.Tags.of(deliveryStream).add('tagKey', 'tagValue'); diff --git a/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.delivery-stream.source-stream.ts b/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.delivery-stream.source-stream.ts index a9455bedabf6e..facfd13a184d1 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.delivery-stream.source-stream.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.delivery-stream.source-stream.ts @@ -34,12 +34,12 @@ const mockS3Destination: firehose.IDestination = { const sourceStream = new kinesis.Stream(stack, 'Source Stream'); new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, sourceStream, }); new firehose.DeliveryStream(stack, 'Delivery Stream No Source Or Encryption Key', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); app.synth(); diff --git a/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.delivery-stream.ts b/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.delivery-stream.ts index 52bacf832a664..633e1fc0b5b2d 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.delivery-stream.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.delivery-stream.ts @@ -36,12 +36,12 @@ const key = new kms.Key(stack, 'Key', { }); new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [mockS3Destination], + destination: mockS3Destination, encryption: firehose.StreamEncryption.customerManagedKey(key), }); new firehose.DeliveryStream(stack, 'Delivery Stream No Source Or Encryption Key', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); app.synth(); diff --git a/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.kinesis-stream-events-target.ts b/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.kinesis-stream-events-target.ts index c3f39133b0b28..a3d65c30c589d 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.kinesis-stream-events-target.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose-alpha/test/integ.kinesis-stream-events-target.ts @@ -34,7 +34,7 @@ const mockS3Destination: firehose.IDestination = { }; const stream = new firehose.DeliveryStream(stack, 'Delivery Stream No Source Or Encryption Key', { - destinations: [mockS3Destination], + destination: mockS3Destination, }); new events.Rule(stack, 'rule', { diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations-alpha/test/integ.s3-bucket.lit.ts b/packages/@aws-cdk/aws-kinesisfirehose-destinations-alpha/test/integ.s3-bucket.lit.ts index d86ce2aac648b..ce115b9af328e 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-destinations-alpha/test/integ.s3-bucket.lit.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations-alpha/test/integ.s3-bucket.lit.ts @@ -45,7 +45,7 @@ const backupKey = new kms.Key(stack, 'BackupKey', { }); new firehose.DeliveryStream(stack, 'Delivery Stream', { - destinations: [new destinations.S3Bucket(bucket, { + destination: new destinations.S3Bucket(bucket, { loggingConfig: new destinations.EnableLogging(logGroup), processor: processor, compression: destinations.Compression.GZIP, @@ -64,16 +64,16 @@ new firehose.DeliveryStream(stack, 'Delivery Stream', { bufferingSize: cdk.Size.mebibytes(1), encryptionKey: backupKey, }, - })], + }), }); new firehose.DeliveryStream(stack, 'ZeroBufferingDeliveryStream', { - destinations: [new destinations.S3Bucket(bucket, { + destination: new destinations.S3Bucket(bucket, { compression: destinations.Compression.GZIP, dataOutputPrefix: 'regularPrefix', errorOutputPrefix: 'errorPrefix', bufferingInterval: cdk.Duration.seconds(0), - })], + }), }); app.synth(); diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations-alpha/test/s3-bucket.test.ts b/packages/@aws-cdk/aws-kinesisfirehose-destinations-alpha/test/s3-bucket.test.ts index 43d385e72036e..9de3d5281fcf9 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-destinations-alpha/test/s3-bucket.test.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations-alpha/test/s3-bucket.test.ts @@ -23,7 +23,7 @@ describe('S3 destination', () => { it('provides defaults when no configuration is provided', () => { new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket, { role: destinationRole })], + destination: new firehosedestinations.S3Bucket(bucket, { role: destinationRole }), }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -42,7 +42,7 @@ describe('S3 destination', () => { it('creates a role when none is provided', () => { new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket)], + destination: new firehosedestinations.S3Bucket(bucket), }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -74,7 +74,7 @@ describe('S3 destination', () => { const destination = new firehosedestinations.S3Bucket(bucket, { role: destinationRole }); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { @@ -111,7 +111,7 @@ describe('S3 destination', () => { role: destinationRole, loggingConfig: new firehosedestinations.EnableLogging(logGroup), }); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { @@ -157,7 +157,7 @@ describe('S3 destination', () => { describe('logging', () => { it('creates resources and configuration by default', () => { new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket)], + destination: new firehosedestinations.S3Bucket(bucket), }); Template.fromStack(stack).resourceCountIs('AWS::Logs::LogGroup', 1); @@ -173,7 +173,7 @@ describe('S3 destination', () => { it('does not create resources or configuration if disabled', () => { new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket, { loggingConfig: new firehosedestinations.DisableLogging() })], + destination: new firehosedestinations.S3Bucket(bucket, { loggingConfig: new firehosedestinations.DisableLogging() }), }); Template.fromStack(stack).resourceCountIs('AWS::Logs::LogGroup', 0); @@ -188,7 +188,7 @@ describe('S3 destination', () => { const logGroup = new logs.LogGroup(stack, 'Log Group'); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket, { loggingConfig: new firehosedestinations.EnableLogging(logGroup) })], + destination: new firehosedestinations.S3Bucket(bucket, { loggingConfig: new firehosedestinations.EnableLogging(logGroup) }), }); Template.fromStack(stack).resourceCountIs('AWS::Logs::LogGroup', 1); @@ -206,9 +206,9 @@ describe('S3 destination', () => { const logGroup = new logs.LogGroup(stack, 'Log Group'); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket, { + destination: new firehosedestinations.S3Bucket(bucket, { loggingConfig: new firehosedestinations.EnableLogging(logGroup), role: destinationRole, - })], + }), }); Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { @@ -249,7 +249,7 @@ describe('S3 destination', () => { it('creates configuration for LambdaFunctionProcessor', () => { new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destinationWithBasicLambdaProcessor], + destination: destinationWithBasicLambdaProcessor, }); Template.fromStack(stack).resourceCountIs('AWS::Lambda::Function', 1); @@ -286,7 +286,7 @@ describe('S3 destination', () => { processor: processor, }); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).resourceCountIs('AWS::Lambda::Function', 1); @@ -326,7 +326,7 @@ describe('S3 destination', () => { it('grants invoke access to the lambda function and delivery stream depends on grant', () => { new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destinationWithBasicLambdaProcessor], + destination: destinationWithBasicLambdaProcessor, }); Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { @@ -357,7 +357,7 @@ describe('S3 destination', () => { compression: firehosedestinations.Compression.GZIP, }); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -372,7 +372,7 @@ describe('S3 destination', () => { compression: firehosedestinations.Compression.of('SNAZZY'), }); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -386,10 +386,10 @@ describe('S3 destination', () => { describe('buffering', () => { it('creates configuration when interval and size provided', () => { new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket, { + destination: new firehosedestinations.S3Bucket(bucket, { bufferingInterval: cdk.Duration.minutes(1), bufferingSize: cdk.Size.mebibytes(1), - })], + }), }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -404,27 +404,27 @@ describe('S3 destination', () => { it('validates bufferingInterval', () => { expect(() => new firehose.DeliveryStream(stack, 'DeliveryStream2', { - destinations: [new firehosedestinations.S3Bucket(bucket, { + destination: new firehosedestinations.S3Bucket(bucket, { bufferingInterval: cdk.Duration.minutes(16), bufferingSize: cdk.Size.mebibytes(1), - })], + }), })).toThrowError('Buffering interval must be less than 900 seconds. Buffering interval provided was 960 seconds.'); }); it('validates bufferingSize', () => { expect(() => new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket, { + destination: new firehosedestinations.S3Bucket(bucket, { bufferingInterval: cdk.Duration.minutes(1), bufferingSize: cdk.Size.mebibytes(0), - })], + }), })).toThrowError('Buffering size must be between 1 and 128 MiBs. Buffering size provided was 0 MiBs'); expect(() => new firehose.DeliveryStream(stack, 'DeliveryStream2', { - destinations: [new firehosedestinations.S3Bucket(bucket, { + destination: new firehosedestinations.S3Bucket(bucket, { bufferingInterval: cdk.Duration.minutes(1), bufferingSize: cdk.Size.mebibytes(256), - })], + }), })).toThrowError('Buffering size must be between 1 and 128 MiBs. Buffering size provided was 256 MiBs'); }); }); @@ -434,10 +434,10 @@ describe('S3 destination', () => { const key = new kms.Key(stack, 'Key'); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket, { + destination: new firehosedestinations.S3Bucket(bucket, { encryptionKey: key, role: destinationRole, - })], + }), }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -455,10 +455,10 @@ describe('S3 destination', () => { const key = new kms.Key(stack, 'Key'); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [new firehosedestinations.S3Bucket(bucket, { + destination: new firehosedestinations.S3Bucket(bucket, { encryptionKey: key, role: destinationRole, - })], + }), }); Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { @@ -488,7 +488,7 @@ describe('S3 destination', () => { }, }); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -513,7 +513,7 @@ describe('S3 destination', () => { }, }); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { @@ -542,7 +542,7 @@ describe('S3 destination', () => { it('by default does not create resources', () => { const destination = new firehosedestinations.S3Bucket(bucket); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).resourceCountIs('AWS::S3::Bucket', 1); @@ -572,7 +572,7 @@ describe('S3 destination', () => { }, }); new firehose.DeliveryStream(stack, 'DeliveryStream', { - destinations: [destination], + destination: destination, }); Template.fromStack(stack).hasResourceProperties('AWS::KinesisFirehose::DeliveryStream', { diff --git a/packages/aws-cdk-lib/aws-logs/README.md b/packages/aws-cdk-lib/aws-logs/README.md index 52dd9aaf84b1f..ef32158b051e5 100644 --- a/packages/aws-cdk-lib/aws-logs/README.md +++ b/packages/aws-cdk-lib/aws-logs/README.md @@ -417,7 +417,7 @@ const bucket = new s3.Bucket(this, 'audit-bucket'); const s3Destination = new destinations.S3Bucket(bucket); const deliveryStream = new kinesisfirehose.DeliveryStream(this, 'Delivery Stream', { - destinations: [s3Destination], + destination: s3Destination, }); const dataProtectionPolicy = new logs.DataProtectionPolicy({