diff --git a/.gitignore b/.gitignore index 450c0ffc94eaf..62e8f2120385e 100644 --- a/.gitignore +++ b/.gitignore @@ -28,4 +28,4 @@ coverage/ cdk.context.json .cdk.staging/ cdk.out/ - +*.tabl.json diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 86f9c0ccb0a8d..a3f8c1379d8c5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,6 +36,7 @@ and let us know if it's not up-to-date (even better, submit a PR with your corr - [Updating all Dependencies](#updating-all-dependencies) - [Running CLI integration tests](#running-cli-integration-tests) - [API Compatibility Checks](#api-compatibility-checks) + - [Examples](#examples) - [Feature Flags](#feature-flags) - [Troubleshooting](#troubleshooting) - [Debugging](#debugging) @@ -515,6 +516,62 @@ this API we will not break anyone, because they weren't able to use it. The file `allowed-breaking-changes.txt` in the root of the repo is an exclusion file that can be used in these cases. +### Examples + +Examples typed in fenced code blocks (looking like `'''ts`, but then with backticks +instead of regular quotes) will be automatically extrated, compiled and translated +to other languages when the bindings are generated. + +To successfully do that, they must be compilable. The easiest way to do that is using +a *fixture*, which looks like this: + +``` +'''ts fixture=with-bucket +bucket.addLifecycleTransition({ ... }); +''' +``` + +While processing the examples, the tool will look for a file called +`rosetta/with-bucket.ts-fixture` in the package directory. This file will be +treated as a regular TypeScript source file, but it must also contain the text +`/// here`, at which point the example will be inserted. The complete file must +compile properly. + +Before the `/// here` marker, the fixture should import the necessary packages +and initialize the required variables. + +If no fixture is specified, the fixture with the name +`rosetta/default.ts-fixture` will be used if present. `nofixture` can be used to +opt out of that behavior. + +In an `@example` block, which is unfenced, the first line of the example can +contain three slashes to achieve the same effect: + +``` +/** + * @example + * /// fixture=with-bucket + * bucket.addLifecycleTransition({ ... }); + */ +``` + +When including packages in your examples (even the package you're writing the +examples for), use the full package name (e.g. `import s3 = +require('@aws-cdk/aws-s3);`). The example will be compiled in an environment +where all CDK packages are available using their public names. In this way, +it's also possible to import packages that are not in the dependency set of +the current package. + +For a practical example of how making sample code compilable works, see the +`aws-ec2` package. + +Examples of all packages are extracted and compiled as part of the packaging +step. If you are working on getting rid of example compilation errors of a +single package, you can run `scripts/compile-samples` on the package by itself. + +For now, non-compiling examples will not yet block the build, but at some point +in the future they will. + ### Feature Flags Sometimes we want to introduce new breaking behavior because we believe this is @@ -547,9 +604,9 @@ The pattern is simple: 5. Under `BREAKING CHANGES` in your commit message describe this new behavior: ``` - BREAKING CHANGE: template file names for new projects created through "cdk init" - will use the template artifact ID instead of the physical stack name to enable - multiple stacks to use the same name. This is enabled through the flag + BREAKING CHANGE: template file names for new projects created through "cdk init" + will use the template artifact ID instead of the physical stack name to enable + multiple stacks to use the same name. This is enabled through the flag `@aws-cdk/core:enableStackNameDuplicates` in newly generated `cdk.json` files. ``` diff --git a/pack.sh b/pack.sh index 199ad97a51c77..49166439a4777 100755 --- a/pack.sh +++ b/pack.sh @@ -25,22 +25,31 @@ function lerna_scopes() { done } -echo "Packaging jsii modules" >&2 +# Compile examples with respect to "decdk" directory, as all packages will +# be symlinked there so they can all be included. +echo "Extracting code samples" >&2 +node --experimental-worker $(which jsii-rosetta) \ + --compile \ + --output samples.tabl.json \ + --directory packages/decdk \ + $(cat $TMPDIR/jsii.txt) # Jsii packaging (all at once using jsii-pacmak) +echo "Packaging jsii modules" >&2 jsii-pacmak \ --verbose \ - --outdir $distdir/ \ + --rosetta-tablet samples.tabl.json \ $(cat $TMPDIR/jsii.txt) # Non-jsii packaging, which means running 'package' in every individual -# module and rsync'ing the result to the shared dist directory. +# module echo "Packaging non-jsii modules" >&2 lerna run $(lerna_scopes $(cat $TMPDIR/nonjsii.txt)) --sort --concurrency=1 --stream package +# Finally rsync all 'dist' directories together into a global 'dist' directory for dir in $(find packages -name dist | grep -v node_modules | grep -v run-wrappers); do - echo "Merging ${dir} into ${distdir}" - rsync -av $dir/ ${distdir}/ + echo "Merging ${dir} into ${distdir}" >&2 + rsync -a $dir/ ${distdir}/ done # Remove a JSII aggregate POM that may have snuk past diff --git a/package.json b/package.json index a4f14858117d8..9cbaa2f28b6e8 100644 --- a/package.json +++ b/package.json @@ -21,6 +21,7 @@ "jest": "^24.9.0", "jsii-diff": "^0.20.2", "jsii-pacmak": "^0.20.2", + "jsii-rosetta": "^0.20.2", "lerna": "^3.18.4", "nodeunit": "^0.11.3", "nyc": "^14.1.1", diff --git a/packages/@aws-cdk/aws-ec2/README.md b/packages/@aws-cdk/aws-ec2/README.md index 62f44423eeaba..34d6dc84e2b9f 100644 --- a/packages/@aws-cdk/aws-ec2/README.md +++ b/packages/@aws-cdk/aws-ec2/README.md @@ -12,6 +12,10 @@ The `@aws-cdk/aws-ec2` package contains primitives for setting up networking and instances. +```ts nofixture +import ec2 = require('@aws-cdk/aws-ec2'); +``` + ## VPC Most projects need a Virtual Private Cloud to provide security by means of @@ -19,8 +23,6 @@ network partitioning. This is achieved by creating an instance of `Vpc`: ```ts -import ec2 = require('@aws-cdk/aws-ec2'); - const vpc = new ec2.Vpc(this, 'VPC'); ``` @@ -186,7 +188,6 @@ by setting the `reserved` subnetConfiguration property to true, as shown below: ```ts -import ec2 = require('@aws-cdk/aws-ec2'); const vpc = new ec2.Vpc(this, 'TheVPC', { natGateways: 1, subnetConfiguration: [ @@ -263,7 +264,7 @@ which you can add egress traffic rules. You can manipulate Security Groups directly: -```ts +```ts fixture=with-vpc const mySecurityGroup = new ec2.SecurityGroup(this, 'SecurityGroup', { vpc, description: 'Allow ssh access to ec2 instances', @@ -281,7 +282,7 @@ have security groups, you have to add an **Egress** rule to one Security Group, and an **Ingress** rule to the other. The connections object will automatically take care of this for you: -```ts +```ts fixture=conns // Allow connections from anywhere loadBalancer.connections.allowFromAnyIpv4(ec2.Port.tcp(443), 'Allow inbound HTTPS'); @@ -296,23 +297,23 @@ appFleet.connections.allowTo(dbFleet, ec2.Port.tcp(443), 'App can call database' There are various classes that implement the connection peer part: -```ts +```ts fixture=conns // Simple connection peers let peer = ec2.Peer.ipv4("10.0.0.0/16"); -let peer = ec2.Peer.anyIpv4(); -let peer = ec2.Peer.ipv6("::0/0"); -let peer = ec2.Peer.anyIpv6(); -let peer = ec2.Peer.prefixList("pl-12345"); -fleet.connections.allowTo(peer, ec2.Port.tcp(443), 'Allow outbound HTTPS'); +peer = ec2.Peer.anyIpv4(); +peer = ec2.Peer.ipv6("::0/0"); +peer = ec2.Peer.anyIpv6(); +peer = ec2.Peer.prefixList("pl-12345"); +appFleet.connections.allowTo(peer, ec2.Port.tcp(443), 'Allow outbound HTTPS'); ``` Any object that has a security group can itself be used as a connection peer: -```ts +```ts fixture=conns // These automatically create appropriate ingress and egress rules in both security groups fleet1.connections.allowTo(fleet2, ec2.Port.tcp(80), 'Allow between fleets'); -fleet.connections.allowFromAnyIpv4(ec2.Port.tcp(80), 'Allow from load balancer'); +appFleet.connections.allowFromAnyIpv4(ec2.Port.tcp(80), 'Allow from load balancer'); ``` ### Port Ranges @@ -342,12 +343,12 @@ If the object you're calling the peering method on has a default port associated For example: -```ts +```ts fixture=conns // Port implicit in listener listener.connections.allowDefaultPortFromAnyIpv4('Allow public'); // Port implicit in peer -fleet.connections.allowDefaultPortTo(rdsDatabase, 'Fleet can access database'); +appFleet.connections.allowDefaultPortTo(rdsDatabase, 'Fleet can access database'); ``` ## Machine Images (AMIs) @@ -374,7 +375,7 @@ examples of things you might want to use: Create your VPC with VPN connections by specifying the `vpnConnections` props (keys are construct `id`s): ```ts -const vpc = new ec2.Vpc(stack, 'MyVpc', { +const vpc = new ec2.Vpc(this, 'MyVpc', { vpnConnections: { dynamic: { // Dynamic routing (BGP) ip: '1.2.3.4' @@ -393,13 +394,13 @@ const vpc = new ec2.Vpc(stack, 'MyVpc', { To create a VPC that can accept VPN connections, set `vpnGateway` to `true`: ```ts -const vpc = new ec2.Vpc(stack, 'MyVpc', { +const vpc = new ec2.Vpc(this, 'MyVpc', { vpnGateway: true }); ``` VPN connections can then be added: -```ts +```ts fixture=with-vpc vpc.addVpnConnection('Dynamic', { ip: '1.2.3.4' }); @@ -408,9 +409,10 @@ vpc.addVpnConnection('Dynamic', { Routes will be propagated on the route tables associated with the private subnets. VPN connections expose [metrics (cloudwatch.Metric)](https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk/aws-cloudwatch/README.md) across all tunnels in the account/region and per connection: -```ts + +```ts fixture=with-vpc // Across all tunnels in the account/region -const allDataOut = VpnConnection.metricAllTunnelDataOut(); +const allDataOut = ec2.VpnConnection.metricAllTunnelDataOut(); // For a specific vpn connection const vpnConnection = vpc.addVpnConnection('Dynamic', { @@ -431,8 +433,9 @@ By default, interface VPC endpoints create a new security group and traffic is * automatically allowed from the VPC CIDR. Use the `connections` object to allow traffic to flow to the endpoint: -```ts -myEndpoint.connections.allowDefaultPortFrom(...); + +```ts fixture=conns +myEndpoint.connections.allowDefaultPortFromAnyIpv4(); ``` Alternatively, existing security groups can be used by specifying the `securityGroups` prop. @@ -443,17 +446,17 @@ You can use bastion hosts using a standard SSH connection targetting port 22 on feature of AWS Systems Manager Session Manager, which does not need an opened security group. (https://aws.amazon.com/about-aws/whats-new/2019/07/session-manager-launches-tunneling-support-for-ssh-and-scp/) A default bastion host for use via SSM can be configured like: -```ts +```ts fixture=with-vpc const host = new ec2.BastionHostLinux(this, 'BastionHost', { vpc }); ``` If you want to connect from the internet using SSH, you need to place the host into a public subnet. You can then configure allowed source hosts. -```ts +```ts fixture=with-vpc const host = new ec2.BastionHostLinux(this, 'BastionHost', { vpc, - subnetSelection: { subnetType: SubnetType.PUBLIC }, + subnetSelection: { subnetType: ec2.SubnetType.PUBLIC }, }); -host.allowSshAccessFrom(Peer.ipv4('1.2.3.4/32')); +host.allowSshAccessFrom(ec2.Peer.ipv4('1.2.3.4/32')); ``` As there are no SSH public keys deployed on this machine, you need to use [EC2 Instance Connect](https://aws.amazon.com/de/blogs/compute/new-using-amazon-ec2-instance-connect-for-ssh-access-to-your-ec2-instances/) diff --git a/packages/@aws-cdk/aws-ec2/lib/instance.ts b/packages/@aws-cdk/aws-ec2/lib/instance.ts index 687d099a1100d..762021085e820 100644 --- a/packages/@aws-cdk/aws-ec2/lib/instance.ts +++ b/packages/@aws-cdk/aws-ec2/lib/instance.ts @@ -143,10 +143,9 @@ export interface InstanceProps { * The role must be assumable by the service principal `ec2.amazonaws.com`: * * @example - * - * const role = new iam.Role(this, 'MyRole', { - * assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com') - * }); + * const role = new iam.Role(this, 'MyRole', { + * assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com') + * }); * * @default - A role will automatically be created, it can be accessed via the `role` property */ diff --git a/packages/@aws-cdk/aws-ec2/lib/nat.ts b/packages/@aws-cdk/aws-ec2/lib/nat.ts index fc0a4fa358bc1..e7c9f2759373a 100644 --- a/packages/@aws-cdk/aws-ec2/lib/nat.ts +++ b/packages/@aws-cdk/aws-ec2/lib/nat.ts @@ -85,9 +85,9 @@ export interface NatInstanceProps { * If you have a specific AMI ID you want to use, pass a `GenericLinuxImage`. For example: * * ```ts - * NatProvider.instance({ - * instanceType: new InstanceType('t3.micro'), - * machineImage: new GenericLinuxImage({ + * ec2.NatProvider.instance({ + * instanceType: new ec2.InstanceType('t3.micro'), + * machineImage: new ec2.GenericLinuxImage({ * 'us-east-2': 'ami-0f9c61b5a562a16af' * }) * }) @@ -215,4 +215,4 @@ export class NatInstanceImage extends LookupMachineImage { owners: ['amazon'], }); } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-ec2/lib/vpc.ts b/packages/@aws-cdk/aws-ec2/lib/vpc.ts index a2d508b444183..570c7b8d2feba 100644 --- a/packages/@aws-cdk/aws-ec2/lib/vpc.ts +++ b/packages/@aws-cdk/aws-ec2/lib/vpc.ts @@ -655,23 +655,25 @@ export interface VpcProps { * subnet in each AZ provide the following: * * ```ts - * subnetConfiguration: [ - * { - * cidrMask: 24, - * name: 'ingress', - * subnetType: SubnetType.PUBLIC, - * }, - * { - * cidrMask: 24, - * name: 'application', - * subnetType: SubnetType.PRIVATE, - * }, - * { - * cidrMask: 28, - * name: 'rds', - * subnetType: SubnetType.ISOLATED, - * } - * ] + * new ec2.Vpc(this, 'VPC', { + * subnetConfiguration: [ + * { + * cidrMask: 24, + * name: 'ingress', + * subnetType: ec2.SubnetType.PUBLIC, + * }, + * { + * cidrMask: 24, + * name: 'application', + * subnetType: ec2.SubnetType.PRIVATE, + * }, + * { + * cidrMask: 28, + * name: 'rds', + * subnetType: ec2.SubnetType.ISOLATED, + * } + * ] + * }); * ``` * * @default - The VPC CIDR will be evenly divided between 1 public and 1 @@ -682,7 +684,7 @@ export interface VpcProps { /** * Indicates whether a VPN gateway should be created and attached to this VPC. * - * @default - true when vpnGatewayAsn or vpnConnections is specified. + * @default - true when vpnGatewayAsn or vpnConnections is specified */ readonly vpnGateway?: boolean; @@ -784,15 +786,13 @@ export interface SubnetConfiguration { * For example: * * ```ts - * import { SubnetType, Vpc } from '@aws-cdk/aws-ec2' - * - * const vpc = new Vpc(this, 'TheVPC', { + * const vpc = new ec2.Vpc(this, 'TheVPC', { * cidr: "10.0.0.0/16" * }) * * // Iterate the private subnets * const selection = vpc.selectSubnets({ - * subnetType: SubnetType.PRIVATE + * subnetType: ec2.SubnetType.PRIVATE * }); * * for (const subnet of selection.subnets) { diff --git a/packages/@aws-cdk/aws-ec2/rosetta/conns.ts-fixture b/packages/@aws-cdk/aws-ec2/rosetta/conns.ts-fixture new file mode 100644 index 0000000000000..f29d9a1816a6e --- /dev/null +++ b/packages/@aws-cdk/aws-ec2/rosetta/conns.ts-fixture @@ -0,0 +1,26 @@ +// Fixture with fake connectables +import { Construct, Stack } from '@aws-cdk/core'; +import ec2 = require('@aws-cdk/aws-ec2'); + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + const vpc = new ec2.Vpc(this, 'VPC'); + + const loadBalancer = new FakeConnectable(); + const appFleet = new FakeConnectable(); + const dbFleet = new FakeConnectable(); + const rdsDatabase = new FakeConnectable(); + const fleet1 = new FakeConnectable(); + const fleet2 = new FakeConnectable(); + const listener = new FakeConnectable(); + const myEndpoint = new FakeConnectable(); + + /// here + } +} + +class FakeConnectable implements ec2.IConnectable { + public readonly connections = new ec2.Connections({ securityGroups: [] }); +} diff --git a/packages/@aws-cdk/aws-ec2/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-ec2/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..d4ecd7e92a6f1 --- /dev/null +++ b/packages/@aws-cdk/aws-ec2/rosetta/default.ts-fixture @@ -0,0 +1,12 @@ +// Fixture with packages imported, but nothing else +import { Construct, Stack } from '@aws-cdk/core'; +import ec2 = require('@aws-cdk/aws-ec2'); +import iam = require('@aws-cdk/aws-iam'); + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-ec2/rosetta/with-vpc.ts-fixture b/packages/@aws-cdk/aws-ec2/rosetta/with-vpc.ts-fixture new file mode 100644 index 0000000000000..dd8e539f8cf9f --- /dev/null +++ b/packages/@aws-cdk/aws-ec2/rosetta/with-vpc.ts-fixture @@ -0,0 +1,13 @@ +// Fixture with packages imported and a VPC created +import { Construct, Stack } from '@aws-cdk/core'; +import ec2 = require('@aws-cdk/aws-ec2'); + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + const vpc = new ec2.Vpc(this, 'VPC'); + + /// here + } +} diff --git a/scripts/compile-samples b/scripts/compile-samples new file mode 100755 index 0000000000000..a253f3ffaa21b --- /dev/null +++ b/scripts/compile-samples @@ -0,0 +1,34 @@ +#!/bin/bash +# A wee little script to compile samples in a package directory. +# +# Normally, samples are only compiled when doing 'pack.sh', if you are iterating +# on them you can use this script for quicker feedback. +# +# This could maybe have been an 'npm run' script, but it's not self-contained +# (needs the "decdk" package to compile against and runs jsii-rosetta from the +# repo root) so that didn't feel right. For now this is what we have. +set -eu +scriptdir=$(cd $(dirname $0) && pwd) + +dirs="${@:-$PWD}" + +for dir in $dirs; do + ( + cd $dir + if [[ ! -f package.json ]]; then + echo "Run this against a directory with package.json in it (got: $PWD)" >&2 + exit 1 + fi + + # Run jsii + npm run build + + # Run rosetta against decdk dir, failing on compilation errors + node --experimental-worker $scriptdir/../node_modules/.bin/jsii-rosetta \ + --directory $scriptdir/../packages/decdk \ + --compile \ + --output /dev/null \ + --verbose \ + --fail + ) +done