diff --git a/builtin/providers/aws/data_source_aws_availability_zone.go b/builtin/providers/aws/data_source_aws_availability_zone.go new file mode 100644 index 000000000000..edab7c92615d --- /dev/null +++ b/builtin/providers/aws/data_source_aws_availability_zone.go @@ -0,0 +1,89 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsAvailabilityZone() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsAvailabilityZoneRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name_suffix": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeAvailabilityZonesInput{} + + if name := d.Get("name"); name != "" { + req.ZoneNames = []*string{aws.String(name.(string))} + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "state": d.Get("state").(string), + }, + ) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] DescribeAvailabilityZones %s\n", req) + resp, err := conn.DescribeAvailabilityZones(req) + if err != nil { + return err + } + if resp == nil || len(resp.AvailabilityZones) == 0 { + return fmt.Errorf("no matching AZ found") + } + if len(resp.AvailabilityZones) > 1 { + return fmt.Errorf("multiple AZs matched; use additional constraints to reduce matches to a single AZ") + } + + az := resp.AvailabilityZones[0] + + // As a convenience when working with AZs generically, we expose + // the AZ suffix alone, without the region name. + // This can be used e.g. to create lookup tables by AZ letter that + // work regardless of region. + nameSuffix := (*az.ZoneName)[len(*az.RegionName):] + + d.SetId(*az.ZoneName) + d.Set("id", az.ZoneName) + d.Set("name", az.ZoneName) + d.Set("name_suffix", nameSuffix) + d.Set("region", az.RegionName) + d.Set("state", az.State) + + return nil +} diff --git a/builtin/providers/aws/data_source_aws_availability_zone_test.go b/builtin/providers/aws/data_source_aws_availability_zone_test.go new file mode 100644 index 000000000000..8808011dbc16 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_availability_zone_test.go @@ -0,0 +1,57 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceAwsAvailabilityZone(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDataSourceAwsAvailabilityZoneConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsAvailabilityZoneCheck("data.aws_availability_zone.by_name"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsAvailabilityZoneCheck(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("root module has no resource called %s", name) + } + + attr := rs.Primary.Attributes + + if attr["name"] != "us-west-2a" { + return fmt.Errorf("bad name %s", attr["name"]) + } + if attr["name_suffix"] != "a" { + return fmt.Errorf("bad name_suffix %s", attr["name_suffix"]) + } + if attr["region"] != "us-west-2" { + return fmt.Errorf("bad region %s", attr["region"]) + } + + return nil + } +} + +const testAccDataSourceAwsAvailabilityZoneConfig = ` +provider "aws" { + region = "us-west-2" +} + +data "aws_availability_zone" "by_name" { + name = "us-west-2a" +} +` diff --git a/builtin/providers/aws/data_source_aws_region.go b/builtin/providers/aws/data_source_aws_region.go new file mode 100644 index 000000000000..ed75f7056100 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_region.go @@ -0,0 +1,84 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsRegion() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsRegionRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "current": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func dataSourceAwsRegionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + currentRegion := meta.(*AWSClient).region + + req := &ec2.DescribeRegionsInput{} + + req.RegionNames = make([]*string, 0, 2) + if name := d.Get("name").(string); name != "" { + req.RegionNames = append(req.RegionNames, aws.String(name)) + } + + if d.Get("current").(bool) { + req.RegionNames = append(req.RegionNames, aws.String(currentRegion)) + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "endpoint": d.Get("endpoint").(string), + }, + ) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] DescribeRegions %s\n", req) + resp, err := conn.DescribeRegions(req) + if err != nil { + return err + } + if resp == nil || len(resp.Regions) == 0 { + return fmt.Errorf("no matching regions found") + } + if len(resp.Regions) > 1 { + return fmt.Errorf("multiple regions matched; use additional constraints to reduce matches to a single region") + } + + region := resp.Regions[0] + + d.SetId(*region.RegionName) + d.Set("id", region.RegionName) + d.Set("name", region.RegionName) + d.Set("endpoint", region.Endpoint) + d.Set("current", *region.RegionName == currentRegion) + + return nil +} diff --git a/builtin/providers/aws/data_source_aws_region_test.go b/builtin/providers/aws/data_source_aws_region_test.go new file mode 100644 index 000000000000..370c4b2b0004 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_region_test.go @@ -0,0 +1,64 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceAwsRegion(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDataSourceAwsRegionConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsRegionCheck("data.aws_region.by_name_current", "us-west-2", "true"), + testAccDataSourceAwsRegionCheck("data.aws_region.by_name_other", "us-west-1", "false"), + testAccDataSourceAwsRegionCheck("data.aws_region.by_current", "us-west-2", "true"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsRegionCheck(name, region, current string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("root module has no resource called %s", name) + } + + attr := rs.Primary.Attributes + + if attr["name"] != region { + return fmt.Errorf("bad name %s", attr["name"]) + } + if attr["current"] != current { + return fmt.Errorf("bad current %s; want %s", attr["current"], current) + } + + return nil + } +} + +const testAccDataSourceAwsRegionConfig = ` +provider "aws" { + region = "us-west-2" +} + +data "aws_region" "by_name_current" { + name = "us-west-2" +} + +data "aws_region" "by_name_other" { + name = "us-west-1" +} + +data "aws_region" "by_current" { + current = true +} +` diff --git a/builtin/providers/aws/data_source_aws_subnet.go b/builtin/providers/aws/data_source_aws_subnet.go new file mode 100644 index 000000000000..ddd178a30bde --- /dev/null +++ b/builtin/providers/aws/data_source_aws_subnet.go @@ -0,0 +1,123 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsSubnet() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsSubnetRead, + + Schema: map[string]*schema.Schema{ + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "cidr_block": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "default_for_az": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "filter": ec2CustomFiltersSchema(), + + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeSubnetsInput{} + + if id := d.Get("id"); id != "" { + req.SubnetIds = []*string{aws.String(id.(string))} + } + + // We specify default_for_az as boolean, but EC2 filters want + // it to be serialized as a string. Note that setting it to + // "false" here does not actually filter by it *not* being + // the default, because Terraform can't distinguish between + // "false" and "not set". + defaultForAzStr := "" + if d.Get("default_for_az").(bool) { + defaultForAzStr = "true" + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "availabilityZone": d.Get("availability_zone").(string), + "cidrBlock": d.Get("cidr_block").(string), + "defaultForAz": defaultForAzStr, + "state": d.Get("state").(string), + "vpc-id": d.Get("vpc_id").(string), + }, + ) + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] DescribeSubnets %s\n", req) + resp, err := conn.DescribeSubnets(req) + if err != nil { + return err + } + if resp == nil || len(resp.Subnets) == 0 { + return fmt.Errorf("no matching subnet found") + } + if len(resp.Subnets) > 1 { + return fmt.Errorf("multiple subnets matched; use additional constraints to reduce matches to a single subnet") + } + + subnet := resp.Subnets[0] + + d.SetId(*subnet.SubnetId) + d.Set("id", subnet.SubnetId) + d.Set("vpc_id", subnet.VpcId) + d.Set("availability_zone", subnet.AvailabilityZone) + d.Set("cidr_block", subnet.CidrBlock) + d.Set("default_for_az", subnet.DefaultForAz) + d.Set("state", subnet.State) + d.Set("tags", tagsToMap(subnet.Tags)) + + return nil +} diff --git a/builtin/providers/aws/data_source_aws_subnet_test.go b/builtin/providers/aws/data_source_aws_subnet_test.go new file mode 100644 index 000000000000..51a76465737d --- /dev/null +++ b/builtin/providers/aws/data_source_aws_subnet_test.go @@ -0,0 +1,125 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceAwsSubnet(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDataSourceAwsSubnetConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_id"), + testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_cidr"), + testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_tag"), + testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_vpc"), + testAccDataSourceAwsSubnetCheck("data.aws_subnet.by_filter"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsSubnetCheck(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("root module has no resource called %s", name) + } + + vpcRs, ok := s.RootModule().Resources["aws_vpc.test"] + if !ok { + return fmt.Errorf("can't find aws_vpc.test in state") + } + subnetRs, ok := s.RootModule().Resources["aws_subnet.test"] + if !ok { + return fmt.Errorf("can't find aws_subnet.test in state") + } + + attr := rs.Primary.Attributes + + if attr["id"] != subnetRs.Primary.Attributes["id"] { + return fmt.Errorf( + "id is %s; want %s", + attr["id"], + subnetRs.Primary.Attributes["id"], + ) + } + + if attr["vpc_id"] != vpcRs.Primary.Attributes["id"] { + return fmt.Errorf( + "vpc_id is %s; want %s", + attr["vpc_id"], + vpcRs.Primary.Attributes["id"], + ) + } + + if attr["cidr_block"] != "172.16.123.0/24" { + return fmt.Errorf("bad cidr_block %s", attr["cidr_block"]) + } + if attr["availability_zone"] != "us-west-2a" { + return fmt.Errorf("bad availability_zone %s", attr["availability_zone"]) + } + if attr["tags.Name"] != "terraform-testacc-subnet-data-source" { + return fmt.Errorf("bad Name tag %s", attr["tags.Name"]) + } + + return nil + } +} + +const testAccDataSourceAwsSubnetConfig = ` +provider "aws" { + region = "us-west-2" +} + +resource "aws_vpc" "test" { + cidr_block = "172.16.0.0/16" + + tags { + Name = "terraform-testacc-subnet-data-source" + } +} + +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "172.16.123.0/24" + availability_zone = "us-west-2a" + + tags { + Name = "terraform-testacc-subnet-data-source" + } +} + +data "aws_subnet" "by_id" { + id = "${aws_subnet.test.id}" +} + +data "aws_subnet" "by_cidr" { + cidr_block = "${aws_subnet.test.cidr_block}" +} + +data "aws_subnet" "by_tag" { + tags { + Name = "${aws_subnet.test.tags["Name"]}" + } +} + +data "aws_subnet" "by_vpc" { + vpc_id = "${aws_subnet.test.vpc_id}" +} + +data "aws_subnet" "by_filter" { + filter { + name = "vpc-id" + values = ["${aws_subnet.test.vpc_id}"] + } +} +` diff --git a/builtin/providers/aws/data_source_aws_vpc.go b/builtin/providers/aws/data_source_aws_vpc.go new file mode 100644 index 000000000000..ffcad8cc6177 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_vpc.go @@ -0,0 +1,121 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsVpc() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsVpcRead, + + Schema: map[string]*schema.Schema{ + "cidr_block": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "dhcp_options_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "default": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "filter": ec2CustomFiltersSchema(), + + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "instance_tenancy": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "tags": tagsSchemaComputed(), + }, + } +} + +func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeVpcsInput{} + + if id := d.Get("id"); id != "" { + req.VpcIds = []*string{aws.String(id.(string))} + } + + // We specify "default" as boolean, but EC2 filters want + // it to be serialized as a string. Note that setting it to + // "false" here does not actually filter by it *not* being + // the default, because Terraform can't distinguish between + // "false" and "not set". + isDefaultStr := "" + if d.Get("default").(bool) { + isDefaultStr = "true" + } + + req.Filters = buildEC2AttributeFilterList( + map[string]string{ + "cidr": d.Get("cidr_block").(string), + "dhcp-options-id": d.Get("dhcp_options_id").(string), + "isDefault": isDefaultStr, + "state": d.Get("state").(string), + }, + ) + req.Filters = append(req.Filters, buildEC2TagFilterList( + tagsFromMap(d.Get("tags").(map[string]interface{})), + )...) + req.Filters = append(req.Filters, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) + if len(req.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filters = nil + } + + log.Printf("[DEBUG] DescribeVpcs %s\n", req) + resp, err := conn.DescribeVpcs(req) + if err != nil { + return err + } + if resp == nil || len(resp.Vpcs) == 0 { + return fmt.Errorf("no matching VPC found") + } + if len(resp.Vpcs) > 1 { + return fmt.Errorf("multiple VPCs matched; use additional constraints to reduce matches to a single VPC") + } + + vpc := resp.Vpcs[0] + + d.SetId(*vpc.VpcId) + d.Set("id", vpc.VpcId) + d.Set("cidr_block", vpc.CidrBlock) + d.Set("dhcp_options_id", vpc.DhcpOptionsId) + d.Set("instance_tenancy", vpc.InstanceTenancy) + d.Set("default", vpc.IsDefault) + d.Set("state", vpc.State) + d.Set("tags", tagsToMap(vpc.Tags)) + + return nil +} diff --git a/builtin/providers/aws/data_source_aws_vpc_test.go b/builtin/providers/aws/data_source_aws_vpc_test.go new file mode 100644 index 000000000000..5b0916f45e31 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_vpc_test.go @@ -0,0 +1,95 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceAwsVpc(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDataSourceAwsVpcConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsVpcCheck("data.aws_vpc.by_id"), + testAccDataSourceAwsVpcCheck("data.aws_vpc.by_cidr"), + testAccDataSourceAwsVpcCheck("data.aws_vpc.by_tag"), + testAccDataSourceAwsVpcCheck("data.aws_vpc.by_filter"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsVpcCheck(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("root module has no resource called %s", name) + } + + vpcRs, ok := s.RootModule().Resources["aws_vpc.test"] + if !ok { + return fmt.Errorf("can't find aws_vpc.test in state") + } + + attr := rs.Primary.Attributes + + if attr["id"] != vpcRs.Primary.Attributes["id"] { + return fmt.Errorf( + "id is %s; want %s", + attr["id"], + vpcRs.Primary.Attributes["id"], + ) + } + + if attr["cidr_block"] != "172.16.0.0/16" { + return fmt.Errorf("bad cidr_block %s", attr["cidr_block"]) + } + if attr["tags.Name"] != "terraform-testacc-vpc-data-source" { + return fmt.Errorf("bad Name tag %s", attr["tags.Name"]) + } + + return nil + } +} + +const testAccDataSourceAwsVpcConfig = ` +provider "aws" { + region = "us-west-2" +} + +resource "aws_vpc" "test" { + cidr_block = "172.16.0.0/16" + + tags { + Name = "terraform-testacc-vpc-data-source" + } +} + +data "aws_vpc" "by_id" { + id = "${aws_vpc.test.id}" +} + +data "aws_vpc" "by_cidr" { + cidr_block = "${aws_vpc.test.cidr_block}" +} + +data "aws_vpc" "by_tag" { + tags { + Name = "${aws_vpc.test.tags["Name"]}" + } +} + +data "aws_vpc" "by_filter" { + filter { + name = "cidr" + values = ["${aws_vpc.test.cidr_block}"] + } +} +` diff --git a/builtin/providers/aws/ec2_filters.go b/builtin/providers/aws/ec2_filters.go new file mode 100644 index 000000000000..249bbf653e50 --- /dev/null +++ b/builtin/providers/aws/ec2_filters.go @@ -0,0 +1,153 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform/helper/schema" +) + +// buildEC2AttributeFilterList takes a flat map of scalar attributes (most +// likely values extracted from a *schema.ResourceData on an EC2-querying +// data source) and produces a []*ec2.Filter representing an exact match +// for each of the given non-empty attributes. +// +// The keys of the given attributes map are the attribute names expected +// by the EC2 API, which are usually either in camelcase or with dash-separated +// words. We conventionally map these to underscore-separated identifiers +// with the same words when presenting these as data source query attributes +// in Terraform. +// +// It's the callers responsibility to transform any non-string values into +// the appropriate string serialization required by the AWS API when +// encoding the given filter. Any attributes given with empty string values +// are ignored, assuming that the user wishes to leave that attribute +// unconstrained while filtering. +// +// The purpose of this function is to create values to pass in +// for the "Filters" attribute on most of the "Describe..." API functions in +// the EC2 API, to aid in the implementation of Terraform data sources that +// retrieve data about EC2 objects. +func buildEC2AttributeFilterList(attrs map[string]string) []*ec2.Filter { + filters := make([]*ec2.Filter, 0, len(attrs)) + + for filterName, value := range attrs { + if value == "" { + continue + } + + filters = append(filters, &ec2.Filter{ + Name: aws.String(filterName), + Values: []*string{aws.String(value)}, + }) + } + + return filters +} + +// buildEC2TagFilterList takes a []*ec2.Tag and produces a []*ec2.Filter that +// represents exact matches for all of the tag key/value pairs given in +// the tag set. +// +// The purpose of this function is to create values to pass in for +// the "Filters" attribute on most of the "Describe..." API functions +// in the EC2 API, to implement filtering by tag values e.g. in Terraform +// data sources that retrieve data about EC2 objects. +// +// It is conventional for an EC2 data source to include an attribute called +// "tags" which conforms to the schema returned by the tagsSchema() function. +// The value of this can then be converted to a tags slice using tagsFromMap, +// and the result finally passed in to this function. +// +// In Terraform configuration this would then look like this, to constrain +// results by name: +// +// tags { +// Name = "my-awesome-subnet" +// } +func buildEC2TagFilterList(tags []*ec2.Tag) []*ec2.Filter { + filters := make([]*ec2.Filter, len(tags)) + + for i, tag := range tags { + filters[i] = &ec2.Filter{ + Name: aws.String(fmt.Sprintf("tag:%s", *tag.Key)), + Values: []*string{tag.Value}, + } + } + + return filters +} + +// ec2CustomFiltersSchema returns a *schema.Schema that represents +// a set of custom filtering criteria that a user can specify as input +// to a data source that wraps one of the many "Describe..." API calls +// in the EC2 API. +// +// It is conventional for an attribute of this type to be included +// as a top-level attribute called "filter". This is the "catch all" for +// filter combinations that are not possible to express using scalar +// attributes or tags. In Terraform configuration, the custom filter blocks +// then look like this: +// +// filter { +// name = "availabilityZone" +// values = ["us-west-2a", "us-west-2b"] +// } +func ec2CustomFiltersSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "values": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + } +} + +// buildEC2CustomFilterList takes the set value extracted from a schema +// attribute conforming to the schema returned by ec2CustomFiltersSchema, +// and transforms it into a []*ec2.Filter representing the same filter +// expressions which is ready to pass into the "Filters" attribute on most +// of the "Describe..." functions in the EC2 API. +// +// This function is intended only to be used in conjunction with +// ec2CustomFitlersSchema. See the docs on that function for more details +// on the configuration pattern this is intended to support. +func buildEC2CustomFilterList(filterSet *schema.Set) []*ec2.Filter { + if filterSet == nil { + return []*ec2.Filter{} + } + + customFilters := filterSet.List() + filters := make([]*ec2.Filter, len(customFilters)) + + for filterIdx, customFilterI := range customFilters { + customFilterMapI := customFilterI.(map[string]interface{}) + name := customFilterMapI["name"].(string) + valuesI := customFilterMapI["values"].(*schema.Set).List() + values := make([]*string, len(valuesI)) + for valueIdx, valueI := range valuesI { + values[valueIdx] = aws.String(valueI.(string)) + } + + filters[filterIdx] = &ec2.Filter{ + Name: &name, + Values: values, + } + } + + return filters +} diff --git a/builtin/providers/aws/ec2_filters_test.go b/builtin/providers/aws/ec2_filters_test.go new file mode 100644 index 000000000000..d0d13b5596d5 --- /dev/null +++ b/builtin/providers/aws/ec2_filters_test.go @@ -0,0 +1,158 @@ +package aws + +import ( + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/hashicorp/terraform/helper/schema" +) + +func TestBuildEC2AttributeFilterList(t *testing.T) { + type TestCase struct { + Attrs map[string]string + Expected []*ec2.Filter + } + testCases := []TestCase{ + { + map[string]string{ + "foo": "bar", + "baz": "boo", + }, + []*ec2.Filter{ + { + Name: aws.String("foo"), + Values: []*string{aws.String("bar")}, + }, + { + Name: aws.String("baz"), + Values: []*string{aws.String("boo")}, + }, + }, + }, + { + map[string]string{ + "foo": "bar", + "baz": "", + }, + []*ec2.Filter{ + { + Name: aws.String("foo"), + Values: []*string{aws.String("bar")}, + }, + }, + }, + } + + for i, testCase := range testCases { + result := buildEC2AttributeFilterList(testCase.Attrs) + + if !reflect.DeepEqual(result, testCase.Expected) { + t.Errorf( + "test case %d: got %#v, but want %#v", + i, result, testCase.Expected, + ) + } + } +} + +func TestBuildEC2TagFilterList(t *testing.T) { + type TestCase struct { + Tags []*ec2.Tag + Expected []*ec2.Filter + } + testCases := []TestCase{ + { + []*ec2.Tag{ + { + Key: aws.String("foo"), + Value: aws.String("bar"), + }, + { + Key: aws.String("baz"), + Value: aws.String("boo"), + }, + }, + []*ec2.Filter{ + { + Name: aws.String("tag:foo"), + Values: []*string{aws.String("bar")}, + }, + { + Name: aws.String("tag:baz"), + Values: []*string{aws.String("boo")}, + }, + }, + }, + } + + for i, testCase := range testCases { + result := buildEC2TagFilterList(testCase.Tags) + + if !reflect.DeepEqual(result, testCase.Expected) { + t.Errorf( + "test case %d: got %#v, but want %#v", + i, result, testCase.Expected, + ) + } + } +} + +func TestBuildEC2CustomFilterList(t *testing.T) { + + // We need to get a set with the appropriate hash function, + // so we'll use the schema to help us produce what would + // be produced in the normal case. + filtersSchema := ec2CustomFiltersSchema() + + // The zero value of this schema will be an interface{} + // referring to a new, empty *schema.Set with the + // appropriate hash function configured. + filters := filtersSchema.ZeroValue().(*schema.Set) + + // We also need an appropriately-configured set for + // the list of values. + valuesSchema := filtersSchema.Elem.(*schema.Resource).Schema["values"] + valuesSet := func(vals ...string) *schema.Set { + ret := valuesSchema.ZeroValue().(*schema.Set) + for _, val := range vals { + ret.Add(val) + } + return ret + } + + filters.Add(map[string]interface{}{ + "name": "foo", + "values": valuesSet("bar", "baz"), + }) + filters.Add(map[string]interface{}{ + "name": "pizza", + "values": valuesSet("cheese"), + }) + + expected := []*ec2.Filter{ + // These are produced in the deterministic order guaranteed + // by schema.Set.List(), which happens to produce them in + // the following order for our current input. If this test + // evolves with different input data in future then they + // will likely be emitted in a different order, which is fine. + { + Name: aws.String("pizza"), + Values: []*string{aws.String("cheese")}, + }, + { + Name: aws.String("foo"), + Values: []*string{aws.String("bar"), aws.String("baz")}, + }, + } + result := buildEC2CustomFilterList(filters) + + if !reflect.DeepEqual(result, expected) { + t.Errorf( + "got %#v, but want %#v", + result, expected, + ) + } +} diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index a9ecf320fde9..9067e75763da 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -144,6 +144,7 @@ func Provider() terraform.ResourceProvider { DataSourcesMap: map[string]*schema.Resource{ "aws_ami": dataSourceAwsAmi(), + "aws_availability_zone": dataSourceAwsAvailabilityZone(), "aws_availability_zones": dataSourceAwsAvailabilityZones(), "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), "aws_caller_identity": dataSourceAwsCallerIdentity(), @@ -153,7 +154,10 @@ func Provider() terraform.ResourceProvider { "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), "aws_ip_ranges": dataSourceAwsIPRanges(), "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), + "aws_region": dataSourceAwsRegion(), "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), + "aws_subnet": dataSourceAwsSubnet(), + "aws_vpc": dataSourceAwsVpc(), }, ResourcesMap: map[string]*schema.Resource{ diff --git a/builtin/providers/aws/tags.go b/builtin/providers/aws/tags.go index 59390d37ebad..7df71b5dc80d 100644 --- a/builtin/providers/aws/tags.go +++ b/builtin/providers/aws/tags.go @@ -22,6 +22,14 @@ func tagsSchema() *schema.Schema { } } +func tagsSchemaComputed() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Computed: true, + } +} + func setElbV2Tags(conn *elbv2.ELBV2, d *schema.ResourceData) error { if d.HasChange("tags") { oraw, nraw := d.GetChange("tags") diff --git a/examples/aws-networking/.gitignore b/examples/aws-networking/.gitignore new file mode 100644 index 000000000000..6382ccad7cc9 --- /dev/null +++ b/examples/aws-networking/.gitignore @@ -0,0 +1,3 @@ +terraform.tfstate +terraform.tfstate.backup +.terraform/* diff --git a/examples/aws-networking/README.md b/examples/aws-networking/README.md new file mode 100644 index 000000000000..f684e7c9b95f --- /dev/null +++ b/examples/aws-networking/README.md @@ -0,0 +1,11 @@ +# AWS Networking Example + +This example creates AWS VPC resources, making a VPC in each of two regions and +then two subnets in each VPC in two different availability zones. + +This example also demonstrates the use of modules to create several copies of +the same resource set with different arguments. The child modules in this +directory are: + +* `region`: container module for all of the network resources within a region. This is instantiated once per region. +* `subnet`: represents a subnet within a given availability zone. This is instantiated twice per region, using the first two availability zones supported within the target AWS account. diff --git a/examples/aws-networking/numbering/variables.tf b/examples/aws-networking/numbering/variables.tf new file mode 100644 index 000000000000..ae32caa89a9b --- /dev/null +++ b/examples/aws-networking/numbering/variables.tf @@ -0,0 +1,27 @@ +variable "region_numbers" { + default = { + us-east-1 = 1 + us-west-1 = 2 + us-west-2 = 3 + eu-west-1 = 4 + } +} + +variable "az_numbers" { + default = { + a = 1 + b = 2 + c = 3 + d = 4 + e = 5 + f = 6 + g = 7 + h = 8 + i = 9 + j = 10 + k = 11 + l = 12 + m = 13 + n = 14 + } +} diff --git a/examples/aws-networking/region/numbering.tf b/examples/aws-networking/region/numbering.tf new file mode 120000 index 000000000000..49f7617b054a --- /dev/null +++ b/examples/aws-networking/region/numbering.tf @@ -0,0 +1 @@ +../numbering/variables.tf \ No newline at end of file diff --git a/examples/aws-networking/region/outputs.tf b/examples/aws-networking/region/outputs.tf new file mode 100644 index 000000000000..fd2c64c3e917 --- /dev/null +++ b/examples/aws-networking/region/outputs.tf @@ -0,0 +1,11 @@ +output "vpc_id" { + value = "${aws_vpc.main.id}" +} + +output "primary_subnet_id" { + value = "${module.primary_subnet.subnet_id}" +} + +output "secondary_subnet_id" { + value = "${module.secondary_subnet.subnet_id}" +} diff --git a/examples/aws-networking/region/security_group.tf b/examples/aws-networking/region/security_group.tf new file mode 100644 index 000000000000..c5792dca55e1 --- /dev/null +++ b/examples/aws-networking/region/security_group.tf @@ -0,0 +1,25 @@ +resource "aws_security_group" "region" { + name = "region" + description = "Open access within this region" + vpc_id = "${aws_vpc.main.id}" + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = ["${aws_vpc.main.cidr_block}"] + } +} + +resource "aws_security_group" "internal-all" { + name = "internal-all" + description = "Open access within the full internal network" + vpc_id = "${aws_vpc.main.id}" + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = ["${var.base_cidr_block}"] + } +} diff --git a/examples/aws-networking/region/subnets.tf b/examples/aws-networking/region/subnets.tf new file mode 100644 index 000000000000..d51b10711174 --- /dev/null +++ b/examples/aws-networking/region/subnets.tf @@ -0,0 +1,14 @@ +data "aws_availability_zones" "all" { +} + +module "primary_subnet" { + source = "../subnet" + vpc_id = "${aws_vpc.main.id}" + availability_zone = "${data.aws_availability_zones.all.names[0]}" +} + +module "secondary_subnet" { + source = "../subnet" + vpc_id = "${aws_vpc.main.id}" + availability_zone = "${data.aws_availability_zones.all.names[1]}" +} diff --git a/examples/aws-networking/region/variables.tf b/examples/aws-networking/region/variables.tf new file mode 100644 index 000000000000..b5916f051cb2 --- /dev/null +++ b/examples/aws-networking/region/variables.tf @@ -0,0 +1,9 @@ +variable "region" { + description = "The name of the AWS region to set up a network within" +} + +variable "base_cidr_block" {} + +provider "aws" { + region = "${var.region}" +} diff --git a/examples/aws-networking/region/vpc.tf b/examples/aws-networking/region/vpc.tf new file mode 100644 index 000000000000..84a5e9114878 --- /dev/null +++ b/examples/aws-networking/region/vpc.tf @@ -0,0 +1,7 @@ +resource "aws_vpc" "main" { + cidr_block = "${cidrsubnet(var.base_cidr_block, 4, lookup(var.region_numbers, var.region))}" +} + +resource "aws_internet_gateway" "main" { + vpc_id = "${aws_vpc.main.id}" +} diff --git a/examples/aws-networking/regions.tf b/examples/aws-networking/regions.tf new file mode 100644 index 000000000000..2041bf70260a --- /dev/null +++ b/examples/aws-networking/regions.tf @@ -0,0 +1,11 @@ +module "us-east-1" { + source = "./region" + region = "us-east-1" + base_cidr_block = "${var.base_cidr_block}" +} + +module "us-west-2" { + source = "./region" + region = "us-west-2" + base_cidr_block = "${var.base_cidr_block}" +} diff --git a/examples/aws-networking/subnet/numbering.tf b/examples/aws-networking/subnet/numbering.tf new file mode 120000 index 000000000000..49f7617b054a --- /dev/null +++ b/examples/aws-networking/subnet/numbering.tf @@ -0,0 +1 @@ +../numbering/variables.tf \ No newline at end of file diff --git a/examples/aws-networking/subnet/outputs.tf b/examples/aws-networking/subnet/outputs.tf new file mode 100644 index 000000000000..e7ef1921b02d --- /dev/null +++ b/examples/aws-networking/subnet/outputs.tf @@ -0,0 +1,3 @@ +output "subnet_id" { + value = "${aws_subnet.main.id}" +} diff --git a/examples/aws-networking/subnet/security_group.tf b/examples/aws-networking/subnet/security_group.tf new file mode 100644 index 000000000000..5761ab56fcfc --- /dev/null +++ b/examples/aws-networking/subnet/security_group.tf @@ -0,0 +1,12 @@ +resource "aws_security_group" "az" { + name = "az-${data.aws_availability_zone.target.name}" + description = "Open access within the AZ ${data.aws_availability_zone.target.name}" + vpc_id = "${var.vpc_id}" + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = ["${aws_subnet.main.cidr_block}"] + } +} diff --git a/examples/aws-networking/subnet/subnet.tf b/examples/aws-networking/subnet/subnet.tf new file mode 100644 index 000000000000..8ad68da49c06 --- /dev/null +++ b/examples/aws-networking/subnet/subnet.tf @@ -0,0 +1,13 @@ +resource "aws_subnet" "main" { + cidr_block = "${cidrsubnet(data.aws_vpc.target.cidr_block, 4, lookup(var.az_numbers, data.aws_availability_zone.target.name_suffix))}" + vpc_id = "${var.vpc_id}" +} + +resource "aws_route_table" "main" { + vpc_id = "${var.vpc_id}" +} + +resource "aws_route_table_association" "main" { + subnet_id = "${aws_subnet.main.id}" + route_table_id = "${aws_route_table.main.id}" +} diff --git a/examples/aws-networking/subnet/variables.tf b/examples/aws-networking/subnet/variables.tf new file mode 100644 index 000000000000..638085268162 --- /dev/null +++ b/examples/aws-networking/subnet/variables.tf @@ -0,0 +1,11 @@ +variable "vpc_id" {} + +variable "availability_zone" {} + +data "aws_availability_zone" "target" { + name = "${var.availability_zone}" +} + +data "aws_vpc" "target" { + id = "${var.vpc_id}" +} diff --git a/examples/aws-networking/variables.tf b/examples/aws-networking/variables.tf new file mode 100644 index 000000000000..054a1fc09f42 --- /dev/null +++ b/examples/aws-networking/variables.tf @@ -0,0 +1,3 @@ +variable "base_cidr_block" { + default = "10.0.0.0/12" +} diff --git a/terraform/eval_read_data.go b/terraform/eval_read_data.go index aeb2ebaef330..fb85a284e8dc 100644 --- a/terraform/eval_read_data.go +++ b/terraform/eval_read_data.go @@ -47,14 +47,17 @@ func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { diff = new(InstanceDiff) } - // id is always computed, because we're always "creating a new resource" + // if id isn't explicitly set then it's always computed, because we're + // always "creating a new resource". diff.init() - diff.SetAttribute("id", &ResourceAttrDiff{ - Old: "", - NewComputed: true, - RequiresNew: true, - Type: DiffAttrOutput, - }) + if _, ok := diff.Attributes["id"]; !ok { + diff.SetAttribute("id", &ResourceAttrDiff{ + Old: "", + NewComputed: true, + RequiresNew: true, + Type: DiffAttrOutput, + }) + } } err = ctx.Hook(func(h Hook) (HookAction, error) { diff --git a/website/source/docs/providers/aws/d/availability_zone.html.markdown b/website/source/docs/providers/aws/d/availability_zone.html.markdown new file mode 100644 index 000000000000..35622c97fbb6 --- /dev/null +++ b/website/source/docs/providers/aws/d/availability_zone.html.markdown @@ -0,0 +1,98 @@ +--- +layout: "aws" +page_title: "AWS: aws_availability_zone" +sidebar_current: "docs-aws-datasource-availability-zone" +description: |- + Provides details about a specific availability zone +--- + +# aws\_availability\_zone + +`aws_availability_zone` provides details about a specific availablity zone (AZ) +in the current region. + +This can be used both to validate an availability zone given in a variable +and to split the AZ name into its component parts of an AWS region and an +AZ identifier letter. The latter may be useful e.g. for implementing a +consistent subnet numbering scheme across several regions by mapping both +the region and the subnet letter to network numbers. + +This is different from the `aws_availability_zones` (plural) data source, +which provides a list of the available zones. + +## Example Usage + +The following example shows how this data source might be used to derive +VPC and subnet CIDR prefixes systematically for an availability zone. + +``` +variable "region_number" { + # Arbitrary mapping of region name to number to use in + # a VPC's CIDR prefix. + default = { + us-east-1 = 1 + us-west-1 = 2 + us-west-2 = 3 + eu-central-1 = 4 + ap-northeast-1 = 5 + } +} + +variable "az_number" { + # Assign a number to each AZ letter used in our configuration + default = { + a = 1 + b = 2 + c = 3 + d = 4 + e = 5 + f = 6 + } +} + +# Retrieve the AZ where we want to create network resources +# This must be in the region selected on the AWS provider. +data "aws_availability_zone" "example" { + name = "eu-central-1a" +} + +# Create a VPC for the region associated with the AZ +resource "aws_vpc" "example" { + cidr_block = "${cidrsubnet("10.0.0.0/8", 4, var.region_number[data.aws_availability_zone.example.region])}" +} + +# Create a subnet for the AZ within the regional VPC +resource "aws_subnet" "example" { + vpc_id = "${aws_vpc.example.id}" + cidr_block = "${cidrsubnet(aws_vpc.example.cidr_block, 4, var.az_number[data.aws_availability_zone.name_suffix])}" +} +``` + +## Argument Reference + +The arguments of this data source act as filters for querying the available +availability zones. The given filters must match exactly one availability +zone whose data will be exported as attributes. + +* `name` - (Optional) The full name of the availability zone to select. + +* `state` - (Optional) A specific availability zone state to require. May + be any of `"available"`, `"information"`, `"impaired"` or `"available"`. + +All reasonable uses of this data source will specify `name`, since `state` +alone would match a single AZ only in a region that itself has only one AZ. + +## Attributes Reference + +The following attributes are exported: + +* `name` - The name of the selected availability zone. + +* `region` - The region where the selected availability zone resides. + This is always the region selected on the provider, since this data source + searches only within that region. + +* `name_suffix` - The part of the AZ name that appears after the region name, + uniquely identifying the AZ within its region. + +* `state` - The current state of the AZ. diff --git a/website/source/docs/providers/aws/d/availability_zones.html.markdown b/website/source/docs/providers/aws/d/availability_zones.html.markdown index 0eb87d781afc..9fd885f10f74 100644 --- a/website/source/docs/providers/aws/d/availability_zones.html.markdown +++ b/website/source/docs/providers/aws/d/availability_zones.html.markdown @@ -12,6 +12,9 @@ The Availability Zones data source allows access to the list of AWS Availability Zones which can be accessed by an AWS account within the region configured in the provider. +This is different from the `aws_availability_zone` (singular) data source, +which provides some details about a specific availability zone. + ## Example Usage ``` diff --git a/website/source/docs/providers/aws/d/region.html.markdown b/website/source/docs/providers/aws/d/region.html.markdown new file mode 100644 index 000000000000..4105639c0e75 --- /dev/null +++ b/website/source/docs/providers/aws/d/region.html.markdown @@ -0,0 +1,54 @@ +--- +layout: "aws" +page_title: "AWS: aws_region" +sidebar_current: "docs-aws-datasource-region" +description: |- + Provides details about a specific service region +--- + +# aws\_region + +`aws_region` provides details about a specific AWS region. + +As well as validating a given region name (and optionally obtaining its +endpoint) this resource can be used to discover the name of the region +configured within the provider. The latter can be useful in a child module +which is inheriting an AWS provider configuration from its parent module. + +## Example Usage + +The following example shows how the resource might be used to obtain +the name of the AWS region configured on the provider. + +``` +data "aws_region" "current" { + current = true +} +``` + +## Argument Reference + +The arguments of this data source act as filters for querying the available +regions. The given filters must match exactly one region whose data will be +exported as attributes. + +* `name` - (Optional) The full name of the region to select. + +* `current` - (Optional) Set to `true` to match only the region configured + in the provider. (It is not meaningful to set this to `false`.) + +* `endpoint` - (Optional) The endpoint of the region to select. + +At least one of the above attributes should be provided to ensure that only +one region is matched. + +## Attributes Reference + +The following attributes are exported: + +* `name` - The name of the selected region. + +* `current` - `true` if the selected region is the one configured on the + provider, or `false` otherwise. + +* `endpoint` - The endpoint for the selected region. diff --git a/website/source/docs/providers/aws/d/subnet.html.markdown b/website/source/docs/providers/aws/d/subnet.html.markdown new file mode 100644 index 000000000000..bcb4a7a653f3 --- /dev/null +++ b/website/source/docs/providers/aws/d/subnet.html.markdown @@ -0,0 +1,81 @@ +--- +layout: "aws" +page_title: "AWS: aws_subnet" +sidebar_current: "docs-aws-datasource-subnet" +description: |- + Provides details about a specific VPC subnet +--- + +# aws\_subnet + +`aws_subnet` provides details about a specific VPC subnet. + +This resource can prove useful when a module accepts a subnet id as +an input variable and needs to, for example, determine the id of the +VPC that the subnet belongs to. + +## Example Usage + +The following example shows how one might accept a subnet id as a variable +and use this data source to obtain the data necessary to create a security +group that allows connections from hosts in that subnet. + +``` +variable "subnet_id" {} + +data "aws_subnet" "selected" { + id = "${var.subnet_id}" +} + +resource "aws_security_group" "subnet" { + vpc_id = "${aws_subnet.selected.vpc_id}" + + ingress { + cidr_blocks = ["${aws_subnet.selected.cidr_block}"] + from_port = 80 + to_port = 80 + protocol = "tcp" + } +} +``` + +## Argument Reference + +The arguments of this data source act as filters for querying the available +subnets in the current region. The given filters must match exactly one +subnet whose data will be exported as attributes. + +* `availability_zone` - (Optional) The availability zone where the + subnet must reside. + +* `cidr_block` - (Optional) The cidr block of the desired subnet. + +* `default_for_az` - (Optional) Boolean constraint for whether the desired + subnet must be the default subnet for its associated availability zone. + +* `filter` - (Optional) Custom filter block as described below. + +* `id` - (Optional) The id of the specific subnet to retrieve. + +* `state` - (Optional) The state that the desired subnet must have. + +* `tags` - (Optional) A mapping of tags, each pair of which must exactly match + a pair on the desired subnet. + +* `vpc_id` - (Optional) The id of the VPC that the desired subnet belongs to. + +More complex filters can be expressed using one or more `filter` sub-blocks, +which take the following arguments: + +* `name` - (Required) The name of the field to filter by, as defined by + [the underlying AWS API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html). + +* `values` - (Required) Set of values that are accepted for the given field. + A subnet will be selected if any one of the given values matches. + +## Attributes Reference + +All of the argument attributes except `filter` blocks are also exported as +result attributes. This data source will complete the data by populating +any fields that are not included in the configuration with the data for +the selected subnet. diff --git a/website/source/docs/providers/aws/d/vpc.html.markdown b/website/source/docs/providers/aws/d/vpc.html.markdown new file mode 100644 index 000000000000..2f4bf6f97f7e --- /dev/null +++ b/website/source/docs/providers/aws/d/vpc.html.markdown @@ -0,0 +1,79 @@ +--- +layout: "aws" +page_title: "AWS: aws_vpc" +sidebar_current: "docs-aws-datasource-vpc" +description: |- + Provides details about a specific VPC +--- + +# aws\_vpc + +`aws_vpc` provides details about a specific VPC. + +This resource can prove useful when a module accepts a vpc id as +an input variable and needs to, for example, determine the CIDR block of that +VPC. + +## Example Usage + +The following example shows how one might accept a VPC id as a variable +and use this data source to obtain the data necessary to create a subnet +within it. + +``` +variable "vpc_id" {} + +data "aws_vpc" "selected" { + id = "${var.vpc_id}" +} + +resource "aws_subnet" "example" { + vpc_id = "${aws_vpc.selected.id}" + availability_zone = "us-west-2a" + cidr_block = "${cidrsubnet(aws_vpc.selected.cidr_block, 4, 1)}" +} +``` + +## Argument Reference + +The arguments of this data source act as filters for querying the available +VPCs in the current region. The given filters must match exactly one +VPC whose data will be exported as attributes. + +* `cidr_block` - (Optional) The cidr block of the desired VPC. + +* `dhcp_options_id` - (Optional) The DHCP options id of the desired VPC. + +* `default` - (Optional) Boolean constraint on whether the desired VPC is + the default VPC for the region. + +* `filter` - (Optional) Custom filter block as described below. + +* `id` - (Optional) The id of the specific VPC to retrieve. + +* `state` - (Optional) The current state of the desired VPC. + Can be either `"pending"` or `"available"`. + +* `tags` - (Optional) A mapping of tags, each pair of which must exactly match + a pair on the desired VPC. + +More complex filters can be expressed using one or more `filter` sub-blocks, +which take the following arguments: + +* `name` - (Required) The name of the field to filter by, as defined by + [the underlying AWS API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html). + +* `values` - (Required) Set of values that are accepted for the given field. + A VPC will be selected if any one of the given values matches. + +## Attributes Reference + +All of the argument attributes except `filter` blocks are also exported as +result attributes. This data source will complete the data by populating +any fields that are not included in the configuration with the data for +the selected VPC. + +The following attribute is additionally exported: + +* `instance_tenancy` - The allowed tenancy of instances launched into the + selected VPC. May be any of `"default"`, `"dedicated"`, or `"host"`. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 07e3bcad04af..d603d6f252b6 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -17,6 +17,9 @@ > aws_ami + > + aws_availability_zone + > aws_availability_zones @@ -38,12 +41,21 @@ > aws_ip_ranges - > - aws_redshift_service_account - + > + aws_redshift_service_account + + > + aws_region + > aws_s3_bucket_object + > + aws_subnet + + > + aws_vpc +