From 6b36d45d9a2b0dfffd7574ebcbd3a2925997704d Mon Sep 17 00:00:00 2001 From: The Magician Date: Thu, 1 Oct 2020 11:53:31 -0700 Subject: [PATCH] Upstream bq mat view (#3897) (#2532) * Add support for bigquery materialized views * Add support for bigquery materialized views * Add ImportStateVerifyIgnore to import test * Fix linter error Co-authored-by: Manasi Belekar Signed-off-by: Modular Magician Co-authored-by: Manasi Belekar --- .changelog/3897.txt | 3 + google-beta/resource_bigquery_table.go | 75 +++++++ google-beta/resource_bigquery_table_test.go | 235 ++++++++++++++++++++ website/docs/r/bigquery_table.html.markdown | 13 ++ 4 files changed, 326 insertions(+) create mode 100644 .changelog/3897.txt diff --git a/.changelog/3897.txt b/.changelog/3897.txt new file mode 100644 index 0000000000..e7e75f82ee --- /dev/null +++ b/.changelog/3897.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +bigquery: added support for `google_bigquery_table` `materialized_view` field +``` diff --git a/google-beta/resource_bigquery_table.go b/google-beta/resource_bigquery_table.go index 7a26db764f..e1d4a2897a 100644 --- a/google-beta/resource_bigquery_table.go +++ b/google-beta/resource_bigquery_table.go @@ -396,6 +396,44 @@ func resourceBigQueryTable() *schema.Resource { }, }, + // Materialized View: [Optional] If specified, configures this table as a materialized view. + "materialized_view": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `If specified, configures this table as a materialized view.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // EnableRefresh: [Optional] Enable automatic refresh of + // the materialized view when the base table is updated. The default + // value is "true". + "enable_refresh": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Specifies if BigQuery should automatically refresh materialized view when the base table is updated. The default is true.`, + }, + + // RefreshIntervalMs: [Optional] The maximum frequency + // at which this materialized view will be refreshed. The default value + // is 1800000 (30 minutes). + "refresh_interval_ms": { + Type: schema.TypeInt, + Default: 1800000, + Optional: true, + Description: `Specifies maximum frequency at which this materialized view will be refreshed. The default is 1800000`, + }, + + // Query: [Required] A query whose result is persisted + "query": { + Type: schema.TypeString, + Required: true, + Description: `A query whose result is persisted.`, + }, + }, + }, + }, + // TimePartitioning: [Experimental] If specified, configures time-based // partitioning for this table. "time_partitioning": { @@ -620,6 +658,10 @@ func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, e table.View = expandView(v) } + if v, ok := d.GetOk("materialized_view"); ok { + table.MaterializedView = expandMaterializedView(v) + } + if v, ok := d.GetOk("description"); ok { table.Description = v.(string) } @@ -864,6 +906,14 @@ func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { } } + if res.MaterializedView != nil { + materialized_view := flattenMaterializedView(res.MaterializedView) + + if err := d.Set("materialized_view", materialized_view); err != nil { + return fmt.Errorf("Error setting materialized view: %s", err) + } + } + return nil } @@ -1272,6 +1322,31 @@ func flattenView(vd *bigquery.ViewDefinition) []map[string]interface{} { return []map[string]interface{}{result} } +func expandMaterializedView(configured interface{}) *bigquery.MaterializedViewDefinition { + raw := configured.([]interface{})[0].(map[string]interface{}) + mvd := &bigquery.MaterializedViewDefinition{Query: raw["query"].(string)} + + if v, ok := raw["enable_refresh"]; ok { + mvd.EnableRefresh = v.(bool) + mvd.ForceSendFields = append(mvd.ForceSendFields, "EnableRefresh") + } + + if v, ok := raw["refresh_interval_ms"]; ok { + mvd.RefreshIntervalMs = int64(v.(int)) + mvd.ForceSendFields = append(mvd.ForceSendFields, "RefreshIntervalMs") + } + + return mvd +} + +func flattenMaterializedView(mvd *bigquery.MaterializedViewDefinition) []map[string]interface{} { + result := map[string]interface{}{"query": mvd.Query} + result["enable_refresh"] = mvd.EnableRefresh + result["refresh_interval_ms"] = mvd.RefreshIntervalMs + + return []map[string]interface{}{result} +} + func resourceBigQueryTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { config := meta.(*Config) if err := parseImportId([]string{ diff --git a/google-beta/resource_bigquery_table_test.go b/google-beta/resource_bigquery_table_test.go index 6af4e26297..0dfefe7e06 100644 --- a/google-beta/resource_bigquery_table_test.go +++ b/google-beta/resource_bigquery_table_test.go @@ -221,6 +221,89 @@ func TestAccBigQueryTable_updateView(t *testing.T) { }) } +func TestAccBigQueryTable_MaterializedView_DailyTimePartioning_Basic(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", randString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", randString(t, 10)) + materialized_viewID := fmt.Sprintf("tf_test_%s", randString(t, 10)) + query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, query), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time"}, + }, + { + ResourceName: "google_bigquery_table.mv_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time"}, + }, + }, + }) +} + +func TestAccBigQueryTable_MaterializedView_DailyTimePartioning_Update(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", randString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", randString(t, 10)) + materialized_viewID := fmt.Sprintf("tf_test_%s", randString(t, 10)) + + query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) + + enable_refresh := "false" + refresh_interval_ms := "3600000" + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, query), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time"}, + }, + { + ResourceName: "google_bigquery_table.mv_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time"}, + }, + { + Config: testAccBigQueryTableWithMatViewDailyTimePartitioning(datasetID, tableID, materialized_viewID, enable_refresh, refresh_interval_ms, query), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time"}, + }, + { + ResourceName: "google_bigquery_table.mv_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time"}, + }, + }, + }) +} + func TestAccBigQueryExternalDataTable_CSV(t *testing.T) { t.Parallel() @@ -656,6 +739,158 @@ resource "google_bigquery_table" "test" { `, datasetID, tableID, "SELECT state FROM `lookerdata.cdc.project_tycho_reports`") } +func testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, mViewID, query string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" +} + +resource "google_bigquery_table" "test" { + table_id = "%s" + dataset_id = google_bigquery_dataset.test.dataset_id + + time_partitioning { + type = "DAY" + field = "ts" + require_partition_filter = true + } + clustering = ["some_int", "some_string"] + schema = <