diff --git a/.changelog/3699.txt b/.changelog/3699.txt new file mode 100644 index 00000000000..d21c7bd7405 --- /dev/null +++ b/.changelog/3699.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +container: Promoted `google_container_cluster` `database_encryption` to GA. +``` diff --git a/google/resource_bigquery_table.go b/google/resource_bigquery_table.go index 01e92e4e85c..7e723671616 100644 --- a/google/resource_bigquery_table.go +++ b/google/resource_bigquery_table.go @@ -336,13 +336,13 @@ func resourceBigQueryTable() *schema.Resource { Description: `Number of milliseconds for which to keep the storage for a partition.`, }, - // Type: [Required] The supported types are DAY and HOUR, which will generate - // one partition per day or hour based on data loading time. + // Type: [Required] The only type supported is DAY, which will generate + // one partition per day based on data loading time. "type": { Type: schema.TypeString, Required: true, - Description: `The supported types are DAY and HOUR, which will generate one partition per day or hour based on data loading time.`, - ValidateFunc: validation.StringInSlice([]string{"DAY", "HOUR"}, false), + Description: `The only type supported is DAY, which will generate one partition per day based on data loading time.`, + ValidateFunc: validation.StringInSlice([]string{"DAY"}, false), }, // Field: [Optional] The field used to determine how to create a time-based diff --git a/google/resource_bigquery_table_test.go b/google/resource_bigquery_table_test.go index 5a6228d3f3b..eb665179e22 100644 --- a/google/resource_bigquery_table_test.go +++ b/google/resource_bigquery_table_test.go @@ -20,7 +20,7 @@ func TestAccBigQueryTable_Basic(t *testing.T) { CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigQueryTableDailyTimePartitioning(datasetID, tableID), + Config: testAccBigQueryTable(datasetID, tableID), }, { ResourceName: "google_bigquery_table.test", @@ -64,37 +64,6 @@ func TestAccBigQueryTable_Kms(t *testing.T) { }) } -func TestAccBigQueryTable_HourlyTimePartitioning(t *testing.T) { - t.Parallel() - - datasetID := fmt.Sprintf("tf_test_%s", randString(t, 10)) - tableID := fmt.Sprintf("tf_test_%s", randString(t, 10)) - - vcrTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccBigQueryTableHourlyTimePartitioning(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccBigQueryTableUpdated(datasetID, tableID), - }, - { - ResourceName: "google_bigquery_table.test", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccBigQueryTable_HivePartitioning(t *testing.T) { t.Parallel() bucketName := testBucketName(t) @@ -292,7 +261,7 @@ func testAccCheckBigQueryTableDestroyProducer(t *testing.T) func(s *terraform.St } } -func testAccBigQueryTableDailyTimePartitioning(datasetID, tableID string) string { +func testAccBigQueryTable(datasetID, tableID string) string { return fmt.Sprintf(` resource "google_bigquery_dataset" "test" { dataset_id = "%s" @@ -349,63 +318,6 @@ EOH `, datasetID, tableID) } -func testAccBigQueryTableHourlyTimePartitioning(datasetID, tableID string) string { - return fmt.Sprintf(` -resource "google_bigquery_dataset" "test" { - dataset_id = "%s" -} - -resource "google_bigquery_table" "test" { - table_id = "%s" - dataset_id = google_bigquery_dataset.test.dataset_id - - time_partitioning { - type = "HOUR" - field = "ts" - require_partition_filter = true - } - clustering = ["some_int", "some_string"] - schema = <