diff --git a/.changelog/29566.txt b/.changelog/29566.txt new file mode 100644 index 00000000000..57afe4ba759 --- /dev/null +++ b/.changelog/29566.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_glue_crawler: Add `create_native_delta_table` attribute to the `delta_target` configuration block +``` \ No newline at end of file diff --git a/internal/service/glue/crawler.go b/internal/service/glue/crawler.go index 35fe8bdd2ce..28723f08c18 100644 --- a/internal/service/glue/crawler.go +++ b/internal/service/glue/crawler.go @@ -35,6 +35,7 @@ func ResourceCrawler() *schema.Resource { ReadWithoutTimeout: resourceCrawlerRead, UpdateWithoutTimeout: resourceCrawlerUpdate, DeleteWithoutTimeout: resourceCrawlerDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -79,6 +80,11 @@ func ResourceCrawler() *schema.Resource { }, }, }, + "classifiers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, "configuration": { Type: schema.TypeString, Optional: true, @@ -89,11 +95,6 @@ func ResourceCrawler() *schema.Resource { }, ValidateFunc: validation.StringIsJSON, }, - "classifiers": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, "database_name": { Type: schema.TypeString, ForceNew: true, @@ -110,6 +111,10 @@ func ResourceCrawler() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "create_native_delta_table": { + Type: schema.TypeBool, + Optional: true, + }, "delta_tables": { Type: schema.TypeSet, Required: true, @@ -280,36 +285,6 @@ func ResourceCrawler() *schema.Resource { return old == strings.TrimPrefix(newARN.Resource, "role/") }, }, - "schedule": { - Type: schema.TypeString, - Optional: true, - }, - "schema_change_policy": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "delete_behavior": { - Type: schema.TypeString, - Optional: true, - Default: glue.DeleteBehaviorDeprecateInDatabase, - ValidateFunc: validation.StringInSlice(glue.DeleteBehavior_Values(), false), - }, - "update_behavior": { - Type: schema.TypeString, - Optional: true, - Default: glue.UpdateBehaviorUpdateInDatabase, - ValidateFunc: validation.StringInSlice(glue.UpdateBehavior_Values(), false), - }, - }, - }, - }, - "security_configuration": { - Type: schema.TypeString, - Optional: true, - }, "s3_target": { Type: schema.TypeList, Optional: true, @@ -348,6 +323,36 @@ func ResourceCrawler() *schema.Resource { }, }, }, + "schedule": { + Type: schema.TypeString, + Optional: true, + }, + "schema_change_policy": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "delete_behavior": { + Type: schema.TypeString, + Optional: true, + Default: glue.DeleteBehaviorDeprecateInDatabase, + ValidateFunc: validation.StringInSlice(glue.DeleteBehavior_Values(), false), + }, + "update_behavior": { + Type: schema.TypeString, + Optional: true, + Default: glue.UpdateBehaviorUpdateInDatabase, + ValidateFunc: validation.StringInSlice(glue.UpdateBehavior_Values(), false), + }, + }, + }, + }, + "security_configuration": { + Type: schema.TypeString, + Optional: true, + }, "table_prefix": { Type: schema.TypeString, Optional: true, @@ -407,6 +412,188 @@ func resourceCrawlerCreate(ctx context.Context, d *schema.ResourceData, meta int return append(diags, resourceCrawlerRead(ctx, d, meta)...) } +func resourceCrawlerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).GlueConn() + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + + crawler, err := FindCrawlerByName(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] Glue Crawler (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Glue Crawler (%s): %s", d.Id(), err) + } + + crawlerARN := arn.ARN{ + Partition: meta.(*conns.AWSClient).Partition, + Service: "glue", + Region: meta.(*conns.AWSClient).Region, + AccountID: meta.(*conns.AWSClient).AccountID, + Resource: fmt.Sprintf("crawler/%s", d.Id()), + }.String() + d.Set("arn", crawlerARN) + d.Set("name", crawler.Name) + d.Set("database_name", crawler.DatabaseName) + d.Set("role", crawler.Role) + d.Set("configuration", crawler.Configuration) + d.Set("description", crawler.Description) + d.Set("security_configuration", crawler.CrawlerSecurityConfiguration) + d.Set("schedule", "") + if crawler.Schedule != nil { + d.Set("schedule", crawler.Schedule.ScheduleExpression) + } + if err := d.Set("classifiers", flex.FlattenStringList(crawler.Classifiers)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting classifiers: %s", err) + } + d.Set("table_prefix", crawler.TablePrefix) + + if crawler.SchemaChangePolicy != nil { + if err := d.Set("schema_change_policy", flattenCrawlerSchemaChangePolicy(crawler.SchemaChangePolicy)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting schema_change_policy: %s", err) + } + } + + if crawler.Targets != nil { + if err := d.Set("dynamodb_target", flattenDynamoDBTargets(crawler.Targets.DynamoDBTargets)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting dynamodb_target: %s", err) + } + + if err := d.Set("jdbc_target", flattenJDBCTargets(crawler.Targets.JdbcTargets)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting jdbc_target: %s", err) + } + + if err := d.Set("s3_target", flattenS3Targets(crawler.Targets.S3Targets)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting s3_target: %s", err) + } + + if err := d.Set("catalog_target", flattenCatalogTargets(crawler.Targets.CatalogTargets)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting catalog_target: %s", err) + } + + if err := d.Set("mongodb_target", flattenMongoDBTargets(crawler.Targets.MongoDBTargets)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting mongodb_target: %s", err) + } + + if err := d.Set("delta_target", flattenDeltaTargets(crawler.Targets.DeltaTargets)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting delta_target: %s", err) + } + } + + tags, err := ListTags(ctx, conn, crawlerARN) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing tags for Glue Crawler (%s): %s", crawlerARN, err) + } + + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags_all: %s", err) + } + + if err := d.Set("lineage_configuration", flattenCrawlerLineageConfiguration(crawler.LineageConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting lineage_configuration: %s", err) + } + + if err := d.Set("lake_formation_configuration", flattenLakeFormationConfiguration(crawler.LakeFormationConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting lake_formation_configuration: %s", err) + } + + if err := d.Set("recrawl_policy", flattenCrawlerRecrawlPolicy(crawler.RecrawlPolicy)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting recrawl_policy: %s", err) + } + + return diags +} + +func resourceCrawlerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + glueConn := meta.(*conns.AWSClient).GlueConn() + name := d.Get("name").(string) + + if d.HasChangesExcept("tags", "tags_all") { + updateCrawlerInput, err := updateCrawlerInput(d, name) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating Glue Crawler (%s): %s", d.Id(), err) + } + + // Retry for IAM eventual consistency + err = resource.RetryContext(ctx, propagationTimeout, func() *resource.RetryError { + _, err := glueConn.UpdateCrawlerWithContext(ctx, updateCrawlerInput) + if err != nil { + // InvalidInputException: Insufficient Lake Formation permission(s) on xxx + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "Insufficient Lake Formation permission") { + return resource.RetryableError(err) + } + + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "Service is unable to assume role") { + return resource.RetryableError(err) + } + + // InvalidInputException: Unable to retrieve connection tf-acc-test-8656357591012534997: User: arn:aws:sts::*******:assumed-role/tf-acc-test-8656357591012534997/AWS-Crawler is not authorized to perform: glue:GetConnection on resource: * (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException; Request ID: 4d72b66f-9c75-11e8-9faf-5b526c7be968) + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "is not authorized") { + return resource.RetryableError(err) + } + + // InvalidInputException: SQS queue arn:aws:sqs:us-west-2:*******:tf-acc-test-4317277351691904203 does not exist or the role provided does not have access to it. + if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "SQS queue") && tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "does not exist or the role provided does not have access to it") { + return resource.RetryableError(err) + } + + return resource.NonRetryableError(err) + } + return nil + }) + + if tfresource.TimedOut(err) { + _, err = glueConn.UpdateCrawlerWithContext(ctx, updateCrawlerInput) + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating Glue Crawler (%s): %s", d.Id(), err) + } + } + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + if err := UpdateTags(ctx, glueConn, d.Get("arn").(string), o, n); err != nil { + return sdkdiag.AppendErrorf(diags, "updating tags: %s", err) + } + } + + return append(diags, resourceCrawlerRead(ctx, d, meta)...) +} + +func resourceCrawlerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + glueConn := meta.(*conns.AWSClient).GlueConn() + + log.Printf("[DEBUG] Deleting Glue Crawler: %s", d.Id()) + _, err := glueConn.DeleteCrawlerWithContext(ctx, &glue.DeleteCrawlerInput{ + Name: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, glue.ErrCodeEntityNotFoundException) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting Glue Crawler (%s): %s", d.Id(), err) + } + + return diags +} + func createCrawlerInput(ctx context.Context, d *schema.ResourceData, crawlerName string, defaultTagsConfig *tftags.DefaultConfig) (*glue.CreateCrawlerInput, error) { tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) @@ -737,8 +924,9 @@ func expandDeltaTargets(targets []interface{}) []*glue.DeltaTarget { func expandDeltaTarget(cfg map[string]interface{}) *glue.DeltaTarget { target := &glue.DeltaTarget{ - DeltaTables: flex.ExpandStringSet(cfg["delta_tables"].(*schema.Set)), - WriteManifest: aws.Bool(cfg["write_manifest"].(bool)), + CreateNativeDeltaTable: aws.Bool(cfg["create_native_delta_table"].(bool)), + DeltaTables: flex.ExpandStringSet(cfg["delta_tables"].(*schema.Set)), + WriteManifest: aws.Bool(cfg["write_manifest"].(bool)), } if v, ok := cfg["connection_name"].(string); ok { @@ -748,168 +936,6 @@ func expandDeltaTarget(cfg map[string]interface{}) *glue.DeltaTarget { return target } -func resourceCrawlerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - glueConn := meta.(*conns.AWSClient).GlueConn() - name := d.Get("name").(string) - - if d.HasChangesExcept("tags", "tags_all") { - updateCrawlerInput, err := updateCrawlerInput(d, name) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Glue Crawler (%s): %s", d.Id(), err) - } - - // Retry for IAM eventual consistency - err = resource.RetryContext(ctx, propagationTimeout, func() *resource.RetryError { - _, err := glueConn.UpdateCrawlerWithContext(ctx, updateCrawlerInput) - if err != nil { - // InvalidInputException: Insufficient Lake Formation permission(s) on xxx - if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "Insufficient Lake Formation permission") { - return resource.RetryableError(err) - } - - if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "Service is unable to assume role") { - return resource.RetryableError(err) - } - - // InvalidInputException: Unable to retrieve connection tf-acc-test-8656357591012534997: User: arn:aws:sts::*******:assumed-role/tf-acc-test-8656357591012534997/AWS-Crawler is not authorized to perform: glue:GetConnection on resource: * (Service: AmazonDataCatalog; Status Code: 400; Error Code: AccessDeniedException; Request ID: 4d72b66f-9c75-11e8-9faf-5b526c7be968) - if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "is not authorized") { - return resource.RetryableError(err) - } - - // InvalidInputException: SQS queue arn:aws:sqs:us-west-2:*******:tf-acc-test-4317277351691904203 does not exist or the role provided does not have access to it. - if tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "SQS queue") && tfawserr.ErrMessageContains(err, glue.ErrCodeInvalidInputException, "does not exist or the role provided does not have access to it") { - return resource.RetryableError(err) - } - - return resource.NonRetryableError(err) - } - return nil - }) - - if tfresource.TimedOut(err) { - _, err = glueConn.UpdateCrawlerWithContext(ctx, updateCrawlerInput) - } - - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Glue Crawler (%s): %s", d.Id(), err) - } - } - - if d.HasChange("tags_all") { - o, n := d.GetChange("tags_all") - if err := UpdateTags(ctx, glueConn, d.Get("arn").(string), o, n); err != nil { - return sdkdiag.AppendErrorf(diags, "updating tags: %s", err) - } - } - - return append(diags, resourceCrawlerRead(ctx, d, meta)...) -} - -func resourceCrawlerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GlueConn() - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - - crawler, err := FindCrawlerByName(ctx, conn, d.Id()) - if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] Glue Crawler (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Glue Crawler (%s): %s", d.Id(), err) - } - - crawlerARN := arn.ARN{ - Partition: meta.(*conns.AWSClient).Partition, - Service: "glue", - Region: meta.(*conns.AWSClient).Region, - AccountID: meta.(*conns.AWSClient).AccountID, - Resource: fmt.Sprintf("crawler/%s", d.Id()), - }.String() - d.Set("arn", crawlerARN) - d.Set("name", crawler.Name) - d.Set("database_name", crawler.DatabaseName) - d.Set("role", crawler.Role) - d.Set("configuration", crawler.Configuration) - d.Set("description", crawler.Description) - d.Set("security_configuration", crawler.CrawlerSecurityConfiguration) - d.Set("schedule", "") - if crawler.Schedule != nil { - d.Set("schedule", crawler.Schedule.ScheduleExpression) - } - if err := d.Set("classifiers", flex.FlattenStringList(crawler.Classifiers)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting classifiers: %s", err) - } - d.Set("table_prefix", crawler.TablePrefix) - - if crawler.SchemaChangePolicy != nil { - if err := d.Set("schema_change_policy", flattenCrawlerSchemaChangePolicy(crawler.SchemaChangePolicy)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting schema_change_policy: %s", err) - } - } - - if crawler.Targets != nil { - if err := d.Set("dynamodb_target", flattenDynamoDBTargets(crawler.Targets.DynamoDBTargets)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting dynamodb_target: %s", err) - } - - if err := d.Set("jdbc_target", flattenJDBCTargets(crawler.Targets.JdbcTargets)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting jdbc_target: %s", err) - } - - if err := d.Set("s3_target", flattenS3Targets(crawler.Targets.S3Targets)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting s3_target: %s", err) - } - - if err := d.Set("catalog_target", flattenCatalogTargets(crawler.Targets.CatalogTargets)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting catalog_target: %s", err) - } - - if err := d.Set("mongodb_target", flattenMongoDBTargets(crawler.Targets.MongoDBTargets)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting mongodb_target: %s", err) - } - - if err := d.Set("delta_target", flattenDeltaTargets(crawler.Targets.DeltaTargets)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting delta_target: %s", err) - } - } - - tags, err := ListTags(ctx, conn, crawlerARN) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing tags for Glue Crawler (%s): %s", crawlerARN, err) - } - - tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - - //lintignore:AWSR002 - if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } - - if err := d.Set("tags_all", tags.Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags_all: %s", err) - } - - if err := d.Set("lineage_configuration", flattenCrawlerLineageConfiguration(crawler.LineageConfiguration)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting lineage_configuration: %s", err) - } - - if err := d.Set("lake_formation_configuration", flattenLakeFormationConfiguration(crawler.LakeFormationConfiguration)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting lake_formation_configuration: %s", err) - } - - if err := d.Set("recrawl_policy", flattenCrawlerRecrawlPolicy(crawler.RecrawlPolicy)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting recrawl_policy: %s", err) - } - - return diags -} - func flattenS3Targets(s3Targets []*glue.S3Target) []map[string]interface{} { result := make([]map[string]interface{}, 0) @@ -996,6 +1022,7 @@ func flattenDeltaTargets(deltaTargets []*glue.DeltaTarget) []map[string]interfac for _, deltaTarget := range deltaTargets { attrs := make(map[string]interface{}) attrs["connection_name"] = aws.StringValue(deltaTarget.ConnectionName) + attrs["create_native_delta_table"] = aws.BoolValue(deltaTarget.CreateNativeDeltaTable) attrs["delta_tables"] = flex.FlattenStringSet(deltaTarget.DeltaTables) attrs["write_manifest"] = aws.BoolValue(deltaTarget.WriteManifest) @@ -1004,23 +1031,6 @@ func flattenDeltaTargets(deltaTargets []*glue.DeltaTarget) []map[string]interfac return result } -func resourceCrawlerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - glueConn := meta.(*conns.AWSClient).GlueConn() - - log.Printf("[DEBUG] deleting Glue Crawler: %s", d.Id()) - _, err := glueConn.DeleteCrawlerWithContext(ctx, &glue.DeleteCrawlerInput{ - Name: aws.String(d.Id()), - }) - if err != nil { - if tfawserr.ErrCodeEquals(err, glue.ErrCodeEntityNotFoundException) { - return diags - } - return sdkdiag.AppendErrorf(diags, "deleting Glue Crawler: %s", err) - } - return diags -} - func flattenCrawlerSchemaChangePolicy(cfg *glue.SchemaChangePolicy) []map[string]interface{} { if cfg == nil { return []map[string]interface{}{} diff --git a/internal/service/glue/crawler_test.go b/internal/service/glue/crawler_test.go index 11099de6bdc..f374cd246bd 100644 --- a/internal/service/glue/crawler_test.go +++ b/internal/service/glue/crawler_test.go @@ -528,11 +528,12 @@ func TestAccGlueCrawler_deltaTarget(t *testing.T) { CheckDestroy: testAccCheckCrawlerDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccCrawlerConfig_deltaTarget(rName, connectionUrl, "s3://table1"), + Config: testAccCrawlerConfig_deltaTarget(rName, connectionUrl, "s3://table1", "null"), Check: resource.ComposeTestCheckFunc( testAccCheckCrawlerExists(ctx, resourceName, &crawler), resource.TestCheckResourceAttr(resourceName, "delta_target.#", "1"), resource.TestCheckResourceAttr(resourceName, "delta_target.0.connection_name", rName), + resource.TestCheckResourceAttr(resourceName, "delta_target.0.create_native_delta_table", "false"), resource.TestCheckResourceAttr(resourceName, "delta_target.0.delta_tables.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "delta_target.0.delta_tables.*", "s3://table1"), resource.TestCheckResourceAttr(resourceName, "delta_target.0.write_manifest", "false"), @@ -544,11 +545,12 @@ func TestAccGlueCrawler_deltaTarget(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccCrawlerConfig_deltaTarget(rName, connectionUrl, "s3://table2"), + Config: testAccCrawlerConfig_deltaTarget(rName, connectionUrl, "s3://table2", "true"), Check: resource.ComposeTestCheckFunc( testAccCheckCrawlerExists(ctx, resourceName, &crawler), resource.TestCheckResourceAttr(resourceName, "delta_target.#", "1"), resource.TestCheckResourceAttr(resourceName, "delta_target.0.connection_name", rName), + resource.TestCheckResourceAttr(resourceName, "delta_target.0.create_native_delta_table", "true"), resource.TestCheckResourceAttr(resourceName, "delta_target.0.delta_tables.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "delta_target.0.delta_tables.*", "s3://table2"), resource.TestCheckResourceAttr(resourceName, "delta_target.0.write_manifest", "false"), @@ -2931,7 +2933,7 @@ resource "aws_glue_crawler" "test" { `, rName, connectionUrl, path1, path2)) } -func testAccCrawlerConfig_deltaTarget(rName, connectionUrl, tableName string) string { +func testAccCrawlerConfig_deltaTarget(rName, connectionUrl, tableName, createNativeDeltaTable string) string { return acctest.ConfigCompose(testAccCrawlerConfig_base(rName), fmt.Sprintf(` resource "aws_glue_catalog_database" "test" { name = %[1]q @@ -2956,12 +2958,13 @@ resource "aws_glue_crawler" "test" { role = aws_iam_role.test.name delta_target { - connection_name = aws_glue_connection.test.name - delta_tables = [%[3]q] - write_manifest = false + connection_name = aws_glue_connection.test.name + delta_tables = [%[3]q] + write_manifest = false + create_native_delta_table = %[4]s } } -`, rName, connectionUrl, tableName)) +`, rName, connectionUrl, tableName, createNativeDeltaTable)) } func testAccCrawlerConfig_lakeformation(rName string, use bool) string { diff --git a/website/docs/r/glue_crawler.html.markdown b/website/docs/r/glue_crawler.html.markdown index 37f076841d8..03f90afa3d8 100644 --- a/website/docs/r/glue_crawler.html.markdown +++ b/website/docs/r/glue_crawler.html.markdown @@ -194,6 +194,7 @@ The following arguments are supported: ### Delta Target * `connection_name` - (Optional) The name of the connection to use to connect to the Delta table target. +* `create_native_delta_table` (Optional) Specifies whether the crawler will create native tables, to allow integration with query engines that support querying of the Delta transaction log directly. * `delta_tables` - (Required) A list of the Amazon S3 paths to the Delta tables. * `write_manifest` - (Required) Specifies whether to write the manifest files to the Delta table path.