diff --git a/.unreleased/pr_7033 b/.unreleased/pr_7033 new file mode 100644 index 00000000000..4aae7a76d9c --- /dev/null +++ b/.unreleased/pr_7033 @@ -0,0 +1 @@ +Implements: #7033 Use MERGE statement on CAgg Refresh diff --git a/src/guc.c b/src/guc.c index dffd8ae928d..24321f2a8c5 100644 --- a/src/guc.c +++ b/src/guc.c @@ -88,7 +88,7 @@ static char *ts_guc_default_orderby_fn = NULL; TSDLLEXPORT bool ts_guc_enable_job_execution_logging = false; bool ts_guc_enable_tss_callbacks = true; TSDLLEXPORT bool ts_guc_enable_delete_after_compression = false; -TSDLLEXPORT bool ts_guc_enable_merge_on_cagg_refresh = true; +TSDLLEXPORT bool ts_guc_enable_merge_on_cagg_refresh = false; /* default value of ts_guc_max_open_chunks_per_insert and ts_guc_max_cached_chunks_per_hypertable * will be set as their respective boot-value when the GUC mechanism starts up */ @@ -577,7 +577,7 @@ _guc_init(void) "Enable MERGE statement on cagg refresh", "Enable MERGE statement on cagg refresh", &ts_guc_enable_merge_on_cagg_refresh, - true, + false, PGC_USERSET, 0, NULL, diff --git a/tsl/src/continuous_aggs/materialize.c b/tsl/src/continuous_aggs/materialize.c index 0c6ad0d3c10..9bee509adff 100644 --- a/tsl/src/continuous_aggs/materialize.c +++ b/tsl/src/continuous_aggs/materialize.c @@ -260,25 +260,22 @@ build_merge_insert_columns(List *strings, const char *separator, const char *pre { StringInfo ret = makeStringInfo(); - if (strings != NIL) - { - ListCell *lc; - foreach (lc, strings) - { - char *grpcol = (char *) lfirst(lc); - if (ret->len > 0) - appendStringInfoString(ret, separator); + Assert(strings != NIL); - if (prefix) - appendStringInfoString(ret, prefix); - appendStringInfoString(ret, quote_identifier(grpcol)); - } + ListCell *lc; + foreach (lc, strings) + { + char *grpcol = (char *) lfirst(lc); + if (ret->len > 0) + appendStringInfoString(ret, separator); - elog(DEBUG2, "%s: %s", __func__, ret->data); - return ret->data; + if (prefix) + appendStringInfoString(ret, prefix); + appendStringInfoString(ret, quote_identifier(grpcol)); } - return NULL; + elog(DEBUG2, "%s: %s", __func__, ret->data); + return ret->data; } static char * @@ -286,27 +283,24 @@ build_merge_join_clause(List *column_names) { StringInfo ret = makeStringInfo(); - if (column_names != NIL) - { - ListCell *lc; - foreach (lc, column_names) - { - char *column = (char *) lfirst(lc); + Assert(column_names != NIL); - if (ret->len > 0) - appendStringInfoString(ret, " AND "); + ListCell *lc; + foreach (lc, column_names) + { + char *column = (char *) lfirst(lc); - appendStringInfoString(ret, "P."); - appendStringInfoString(ret, quote_identifier(column)); - appendStringInfoString(ret, " IS NOT DISTINCT FROM M."); - appendStringInfoString(ret, quote_identifier(column)); - } + if (ret->len > 0) + appendStringInfoString(ret, " AND "); - elog(DEBUG2, "%s: %s", __func__, ret->data); - return ret->data; + appendStringInfoString(ret, "P."); + appendStringInfoString(ret, quote_identifier(column)); + appendStringInfoString(ret, " IS NOT DISTINCT FROM M."); + appendStringInfoString(ret, quote_identifier(column)); } - return NULL; + elog(DEBUG2, "%s: %s", __func__, ret->data); + return ret->data; } static char * @@ -314,26 +308,23 @@ build_merge_update_clause(List *column_names) { StringInfo ret = makeStringInfo(); - if (column_names != NIL) - { - ListCell *lc; - foreach (lc, column_names) - { - char *column = (char *) lfirst(lc); + Assert(column_names != NIL); - if (ret->len > 0) - appendStringInfoString(ret, ", "); + ListCell *lc; + foreach (lc, column_names) + { + char *column = (char *) lfirst(lc); - appendStringInfoString(ret, quote_identifier(column)); - appendStringInfoString(ret, " = P."); - appendStringInfoString(ret, quote_identifier(column)); - } + if (ret->len > 0) + appendStringInfoString(ret, ", "); - elog(DEBUG2, "%s: %s", __func__, ret->data); - return ret->data; + appendStringInfoString(ret, quote_identifier(column)); + appendStringInfoString(ret, " = P."); + appendStringInfoString(ret, quote_identifier(column)); } - return NULL; + elog(DEBUG2, "%s: %s", __func__, ret->data); + return ret->data; } static void @@ -342,9 +333,6 @@ spi_update_watermark(Hypertable *mat_ht, SchemaAndName materialization_table, Oid materialization_type, const char *const chunk_condition) { int res; - int64 watermark; - bool isnull = true; - Datum maxdat; StringInfo command = makeStringInfo(); appendStringInfo(command, @@ -369,12 +357,15 @@ spi_update_watermark(Hypertable *mat_ht, SchemaAndName materialization_table, materialization_type); if (SPI_processed > 0) - maxdat = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull); - - if (!isnull) { - watermark = ts_time_value_to_internal(maxdat, materialization_type); - ts_cagg_watermark_update(mat_ht, watermark, isnull, false); + bool isnull; + Datum maxdat = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull); + + if (!isnull) + { + int64 watermark = ts_time_value_to_internal(maxdat, materialization_type); + ts_cagg_watermark_update(mat_ht, watermark, isnull, false); + } } } diff --git a/tsl/test/expected/cagg_query.out b/tsl/test/expected/cagg_query.out index 0cd74a2e79f..8a0be3ea6bc 100644 --- a/tsl/test/expected/cagg_query.out +++ b/tsl/test/expected/cagg_query.out @@ -1,7 +1,15 @@ -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. +-- Connect as superuser to use SET ROLE later +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- Run tests with default role +SET ROLE :ROLE_DEFAULT_PERM_USER; \set TEST_BASE_NAME cagg_query +\ir include/cagg_query_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. SELECT format('%s/results/%s_results_view.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW", format('%s/results/%s_results_view_hashagg.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW_HASHAGG", @@ -752,7 +760,7 @@ CREATE TABLE temperature_wo_tz ( value float ); SELECT create_hypertable('temperature_wo_tz', 'time'); -WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +psql:include/cagg_query_common.sql:316: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices create_hypertable -------------------------------- (5,public,temperature_wo_tz,t) @@ -784,21 +792,21 @@ CREATE TABLE table_bigint ( data bigint ); SELECT create_hypertable('table_smallint', 'time', chunk_time_interval => 10); -NOTICE: adding not-null constraint to column "time" +psql:include/cagg_query_common.sql:345: NOTICE: adding not-null constraint to column "time" create_hypertable ----------------------------- (7,public,table_smallint,t) (1 row) SELECT create_hypertable('table_int', 'time', chunk_time_interval => 10); -NOTICE: adding not-null constraint to column "time" +psql:include/cagg_query_common.sql:346: NOTICE: adding not-null constraint to column "time" create_hypertable ------------------------ (8,public,table_int,t) (1 row) SELECT create_hypertable('table_bigint', 'time', chunk_time_interval => 10); -NOTICE: adding not-null constraint to column "time" +psql:include/cagg_query_common.sql:347: NOTICE: adding not-null constraint to column "time" create_hypertable --------------------------- (9,public,table_bigint,t) @@ -839,7 +847,7 @@ CREATE MATERIALIZED VIEW cagg_4_hours SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours" +psql:include/cagg_query_common.sql:372: NOTICE: refreshing continuous aggregate "cagg_4_hours" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+----------------+-------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -847,13 +855,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours; -NOTICE: drop cascades to table _timescaledb_internal._hyper_10_14_chunk +psql:include/cagg_query_common.sql:374: NOTICE: drop cascades to table _timescaledb_internal._hyper_10_14_chunk CREATE MATERIALIZED VIEW cagg_4_hours_offset WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_offset" +psql:include/cagg_query_common.sql:380: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+---------------------+----------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -861,13 +869,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_offset; -NOTICE: drop cascades to table _timescaledb_internal._hyper_11_15_chunk +psql:include/cagg_query_common.sql:382: NOTICE: drop cascades to table _timescaledb_internal._hyper_11_15_chunk CREATE MATERIALIZED VIEW cagg_4_hours_offset2 WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_offset2" +psql:include/cagg_query_common.sql:388: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset2" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset2'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+----------------------+----------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -875,14 +883,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset2'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_offset2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_12_16_chunk +psql:include/cagg_query_common.sql:390: NOTICE: drop cascades to table _timescaledb_internal._hyper_12_16_chunk -- Variable buckets (timezone is provided) with offset CREATE MATERIALIZED VIEW cagg_4_hours_offset_ts WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, timezone=>'UTC'), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_offset_ts" +psql:include/cagg_query_common.sql:397: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset_ts" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_ts'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+------------------------+---------------------------------------------------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -890,13 +898,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_ts'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_offset_ts; -NOTICE: drop cascades to table _timescaledb_internal._hyper_13_17_chunk +psql:include/cagg_query_common.sql:399: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_17_chunk CREATE MATERIALIZED VIEW cagg_4_hours_origin WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_origin" +psql:include/cagg_query_common.sql:405: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+---------------------+--------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -904,14 +912,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_origin; -NOTICE: drop cascades to table _timescaledb_internal._hyper_14_18_chunk +psql:include/cagg_query_common.sql:407: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_18_chunk -- Using named parameter CREATE MATERIALIZED VIEW cagg_4_hours_origin2 WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_origin2" +psql:include/cagg_query_common.sql:414: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin2" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin2'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+----------------------+--------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -919,14 +927,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin2'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_origin2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_15_19_chunk +psql:include/cagg_query_common.sql:416: NOTICE: drop cascades to table _timescaledb_internal._hyper_15_19_chunk -- Variable buckets (timezone is provided) with origin CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00 PST'::timestamptz, timezone=>'UTC'), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts" +psql:include/cagg_query_common.sql:423: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+------------------------+---------------------------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -934,14 +942,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_origin_ts; -NOTICE: drop cascades to table _timescaledb_internal._hyper_16_20_chunk +psql:include/cagg_query_common.sql:425: NOTICE: drop cascades to table _timescaledb_internal._hyper_16_20_chunk -- Without named parameter CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts2 WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, 'UTC', '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts2" +psql:include/cagg_query_common.sql:432: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts2" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts2'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+-------------------------+---------------------------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -949,14 +957,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts2'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_origin_ts2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_17_21_chunk +psql:include/cagg_query_common.sql:434: NOTICE: drop cascades to table _timescaledb_internal._hyper_17_21_chunk -- Timestamp based CAggs CREATE MATERIALIZED VIEW cagg_4_hours_wo_tz WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time), max(value) FROM temperature_wo_tz GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_wo_tz" +psql:include/cagg_query_common.sql:441: NOTICE: refreshing continuous aggregate "cagg_4_hours_wo_tz" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_wo_tz'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+--------------------+----------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -968,7 +976,7 @@ CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00'::timestamp), max(value) FROM temperature_wo_tz GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts_wo_tz" +psql:include/cagg_query_common.sql:448: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts_wo_tz" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+------------------------------+--------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -976,14 +984,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz; -NOTICE: drop cascades to table _timescaledb_internal._hyper_19_23_chunk +psql:include/cagg_query_common.sql:450: NOTICE: drop cascades to table _timescaledb_internal._hyper_19_23_chunk -- Variable buckets (timezone is provided) with origin CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz2 WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00'::timestamp), max(value) FROM temperature_wo_tz GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts_wo_tz2" +psql:include/cagg_query_common.sql:457: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts_wo_tz2" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz2'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+-------------------------------+--------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -991,13 +999,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz2'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_20_24_chunk +psql:include/cagg_query_common.sql:459: NOTICE: drop cascades to table _timescaledb_internal._hyper_20_24_chunk CREATE MATERIALIZED VIEW cagg_4_hours_offset_wo_tz WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval), max(value) FROM temperature_wo_tz GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_offset_wo_tz" +psql:include/cagg_query_common.sql:465: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset_wo_tz" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_wo_tz'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+---------------------------+-------------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -1005,16 +1013,16 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_wo_tz'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_offset_wo_tz; -NOTICE: drop cascades to table _timescaledb_internal._hyper_21_25_chunk +psql:include/cagg_query_common.sql:467: NOTICE: drop cascades to table _timescaledb_internal._hyper_21_25_chunk DROP MATERIALIZED VIEW cagg_4_hours_wo_tz; -NOTICE: drop cascades to table _timescaledb_internal._hyper_18_22_chunk +psql:include/cagg_query_common.sql:468: NOTICE: drop cascades to table _timescaledb_internal._hyper_18_22_chunk -- Date based CAggs CREATE MATERIALIZED VIEW cagg_4_hours_date WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 days', time), max(value) FROM temperature_date GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_date" +psql:include/cagg_query_common.sql:475: NOTICE: refreshing continuous aggregate "cagg_4_hours_date" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+-------------------+----------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -1022,13 +1030,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_date; -NOTICE: drop cascades to table _timescaledb_internal._hyper_22_26_chunk +psql:include/cagg_query_common.sql:477: NOTICE: drop cascades to table _timescaledb_internal._hyper_22_26_chunk CREATE MATERIALIZED VIEW cagg_4_hours_date_origin WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 days', time, '2000-01-01'::date), max(value) FROM temperature_date GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_date_origin" +psql:include/cagg_query_common.sql:483: NOTICE: refreshing continuous aggregate "cagg_4_hours_date_origin" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+--------------------------+--------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -1036,13 +1044,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_date_origin; -NOTICE: drop cascades to table _timescaledb_internal._hyper_23_27_chunk +psql:include/cagg_query_common.sql:485: NOTICE: drop cascades to table _timescaledb_internal._hyper_23_27_chunk CREATE MATERIALIZED VIEW cagg_4_hours_date_origin2 WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 days', time, origin=>'2000-01-01'::date), max(value) FROM temperature_date GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_date_origin2" +psql:include/cagg_query_common.sql:491: NOTICE: refreshing continuous aggregate "cagg_4_hours_date_origin2" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin2'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+---------------------------+--------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -1050,13 +1058,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin2'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_date_origin2; -NOTICE: drop cascades to table _timescaledb_internal._hyper_24_28_chunk +psql:include/cagg_query_common.sql:493: NOTICE: drop cascades to table _timescaledb_internal._hyper_24_28_chunk CREATE MATERIALIZED VIEW cagg_4_hours_date_offset WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 days', time, "offset"=>'30m'::interval), max(value) FROM temperature_date GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_date_offset" +psql:include/cagg_query_common.sql:499: NOTICE: refreshing continuous aggregate "cagg_4_hours_date_offset" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_offset'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+--------------------------+-------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -1064,14 +1072,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_offset'; (1 row) DROP MATERIALIZED VIEW cagg_4_hours_date_offset; -NOTICE: drop cascades to table _timescaledb_internal._hyper_25_29_chunk +psql:include/cagg_query_common.sql:501: NOTICE: drop cascades to table _timescaledb_internal._hyper_25_29_chunk -- Integer based CAggs CREATE MATERIALIZED VIEW cagg_smallint WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS SELECT time_bucket('2', time), SUM(data) as value FROM table_smallint GROUP BY 1; -NOTICE: refreshing continuous aggregate "cagg_smallint" +psql:include/cagg_query_common.sql:508: NOTICE: refreshing continuous aggregate "cagg_smallint" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+----------------+---------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -1079,13 +1087,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint'; (1 row) DROP MATERIALIZED VIEW cagg_smallint; -NOTICE: drop cascades to table _timescaledb_internal._hyper_26_30_chunk +psql:include/cagg_query_common.sql:510: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_30_chunk CREATE MATERIALIZED VIEW cagg_smallint_offset WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS SELECT time_bucket('2', time, "offset"=>1::smallint), SUM(data) as value FROM table_smallint GROUP BY 1; -NOTICE: refreshing continuous aggregate "cagg_smallint_offset" +psql:include/cagg_query_common.sql:516: NOTICE: refreshing continuous aggregate "cagg_smallint_offset" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint_offset'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+----------------------+------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -1093,13 +1101,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint_offset'; (1 row) DROP MATERIALIZED VIEW cagg_smallint_offset; -NOTICE: drop cascades to table _timescaledb_internal._hyper_27_31_chunk +psql:include/cagg_query_common.sql:518: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_31_chunk CREATE MATERIALIZED VIEW cagg_int WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS SELECT time_bucket('2', time), SUM(data) as value FROM table_int GROUP BY 1; -NOTICE: refreshing continuous aggregate "cagg_int" +psql:include/cagg_query_common.sql:524: NOTICE: refreshing continuous aggregate "cagg_int" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+----------------+-------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -1107,13 +1115,13 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int'; (1 row) DROP MATERIALIZED VIEW cagg_int; -NOTICE: drop cascades to table _timescaledb_internal._hyper_28_32_chunk +psql:include/cagg_query_common.sql:526: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_32_chunk CREATE MATERIALIZED VIEW cagg_int_offset WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS SELECT time_bucket('2', time, "offset"=>1::int), SUM(data) as value FROM table_int GROUP BY 1; -NOTICE: refreshing continuous aggregate "cagg_int_offset" +psql:include/cagg_query_common.sql:532: NOTICE: refreshing continuous aggregate "cagg_int_offset" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int_offset'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+-----------------+---------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -1121,7 +1129,7 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int_offset'; (1 row) DROP MATERIALIZED VIEW cagg_int_offset; -NOTICE: drop cascades to table _timescaledb_internal._hyper_29_33_chunk +psql:include/cagg_query_common.sql:534: NOTICE: drop cascades to table _timescaledb_internal._hyper_29_33_chunk CREATE MATERIALIZED VIEW cagg_bigint WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS SELECT time_bucket('2', time), SUM(data) as value @@ -1159,16 +1167,16 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint_offset2'; (1 row) -- mess with the bucket_func signature to make sure it will raise an exception -\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +SET ROLE :ROLE_CLUSTER_SUPERUSER; \set ON_ERROR_STOP 0 BEGIN; UPDATE _timescaledb_catalog.continuous_aggs_bucket_function SET bucket_func = 'func_does_not_exist()'; -- should error because function does not exist CALL refresh_continuous_aggregate('cagg_bigint_offset2', NULL, NULL); -ERROR: function "func_does_not_exist()" does not exist +psql:include/cagg_query_common.sql:566: ERROR: function "func_does_not_exist()" does not exist ROLLBACK; \set ON_ERROR_STOP 1 -\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SET ROLE :ROLE_DEFAULT_PERM_USER; DROP MATERIALIZED VIEW cagg_bigint_offset2; -- Test invalid bucket definitions \set ON_ERROR_STOP 0 @@ -1178,14 +1186,14 @@ CREATE MATERIALIZED VIEW cagg_4_hours_offset_and_origin SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, origin=>'2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -ERROR: function time_bucket(unknown, timestamp with time zone, offset => interval, origin => timestamp with time zone) does not exist at character 140 +psql:include/cagg_query_common.sql:580: ERROR: function time_bucket(unknown, timestamp with time zone, offset => interval, origin => timestamp with time zone) does not exist at character 140 -- Offset and origin at the same time is not allowed (function does exists but invalid parameter combination) CREATE MATERIALIZED VIEW cagg_4_hours_offset_and_origin WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, origin=>'2000-01-01 01:00:00 PST'::timestamptz, timezone=>'UTC'), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -ERROR: using offset and origin in a time_bucket function at the same time is not supported +psql:include/cagg_query_common.sql:587: ERROR: using offset and origin in a time_bucket function at the same time is not supported \set ON_ERROR_STOP 1 --- -- Tests with CAgg processing @@ -1212,20 +1220,20 @@ CREATE MATERIALIZED VIEW cagg_4_hours SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours" +psql:include/cagg_query_common.sql:613: NOTICE: refreshing continuous aggregate "cagg_4_hours" CREATE MATERIALIZED VIEW cagg_4_hours_offset WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_offset" +psql:include/cagg_query_common.sql:619: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset" -- Align origin with first value CREATE MATERIALIZED VIEW cagg_4_hours_origin WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_4_hours_origin" +psql:include/cagg_query_common.sql:626: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin" -- Query the CAggs and check that all buckets are materialized SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; time_bucket | max @@ -1534,24 +1542,24 @@ SELECT * FROM cagg_4_hours_origin; -- Update materialized data SET client_min_messages TO DEBUG1; CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); -LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Thu Jan 02 00:00:00 2020 PST, Thu Jan 02 12:00:00 2020 PST ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" -LOG: inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:683: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +psql:include/cagg_query_common.sql:683: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Thu Jan 02 00:00:00 2020 PST, Thu Jan 02 12:00:00 2020 PST ] +psql:include/cagg_query_common.sql:683: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:683: LOG: inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); -LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); -DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577995200000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Wed Jan 01 20:30:00 2020 PST, Thu Jan 02 12:30:00 2020 PST ] -LOG: deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" -LOG: inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:684: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +psql:include/cagg_query_common.sql:684: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577995200000000 +psql:include/cagg_query_common.sql:684: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Wed Jan 01 20:30:00 2020 PST, Thu Jan 02 12:30:00 2020 PST ] +psql:include/cagg_query_common.sql:684: LOG: deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:684: LOG: inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); -LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); -DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577995200000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Wed Jan 01 21:00:00 2020 PST, Thu Jan 02 13:00:00 2020 PST ] -LOG: deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" -LOG: inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:685: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +psql:include/cagg_query_common.sql:685: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577995200000000 +psql:include/cagg_query_common.sql:685: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Wed Jan 01 21:00:00 2020 PST, Thu Jan 02 13:00:00 2020 PST ] +psql:include/cagg_query_common.sql:685: LOG: deleted 1 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:685: LOG: inserted 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" RESET client_min_messages; -LOG: statement: RESET client_min_messages; +psql:include/cagg_query_common.sql:686: LOG: statement: RESET client_min_messages; -- Query the CAggs and check that all buckets are materialized SELECT * FROM cagg_4_hours; time_bucket | max @@ -1754,40 +1762,40 @@ INSERT INTO temperature values('2020-01-02 01:35:00+01', 5555); INSERT INTO temperature values('2020-01-02 05:05:00+01', 8888); SET client_min_messages TO DEBUG1; CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); -LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); -DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Sat Jan 01 00:00:00 2000 PST, Sun Jan 02 00:00:00 2000 PST ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" -LOG: inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" -DEBUG: hypertable 33 existing watermark >= new watermark 1577995200000000 946800000000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Wed Jan 01 00:00:00 2020 PST, Thu Jan 02 00:00:00 2020 PST ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" -LOG: inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" -DEBUG: hypertable 33 existing watermark >= new watermark 1577995200000000 1577952000000000 +psql:include/cagg_query_common.sql:725: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +psql:include/cagg_query_common.sql:725: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 +psql:include/cagg_query_common.sql:725: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Sat Jan 01 00:00:00 2000 PST, Sun Jan 02 00:00:00 2000 PST ] +psql:include/cagg_query_common.sql:725: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:725: LOG: inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:725: DEBUG: hypertable 33 existing watermark >= new watermark 1577995200000000 946800000000000 +psql:include/cagg_query_common.sql:725: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Wed Jan 01 00:00:00 2020 PST, Thu Jan 02 00:00:00 2020 PST ] +psql:include/cagg_query_common.sql:725: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:725: LOG: inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:725: DEBUG: hypertable 33 existing watermark >= new watermark 1577995200000000 1577952000000000 CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); -LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); -DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Fri Dec 31 20:30:00 1999 PST, Sun Jan 02 00:30:00 2000 PST ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" -LOG: inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" -DEBUG: hypertable 34 existing watermark >= new watermark 1577997000000000 946801800000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Tue Dec 31 20:30:00 2019 PST, Thu Jan 02 00:30:00 2020 PST ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" -LOG: inserted 7 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" -DEBUG: hypertable 34 existing watermark >= new watermark 1577997000000000 1577953800000000 +psql:include/cagg_query_common.sql:726: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +psql:include/cagg_query_common.sql:726: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 +psql:include/cagg_query_common.sql:726: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Fri Dec 31 20:30:00 1999 PST, Sun Jan 02 00:30:00 2000 PST ] +psql:include/cagg_query_common.sql:726: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:726: LOG: inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:726: DEBUG: hypertable 34 existing watermark >= new watermark 1577997000000000 946801800000000 +psql:include/cagg_query_common.sql:726: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Tue Dec 31 20:30:00 2019 PST, Thu Jan 02 00:30:00 2020 PST ] +psql:include/cagg_query_common.sql:726: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:726: LOG: inserted 7 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:726: DEBUG: hypertable 34 existing watermark >= new watermark 1577997000000000 1577953800000000 CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); -LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); -DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Fri Dec 31 21:00:00 1999 PST, Sun Jan 02 01:00:00 2000 PST ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" -LOG: inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" -DEBUG: hypertable 35 existing watermark >= new watermark 1577998800000000 946803600000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Tue Dec 31 21:00:00 2019 PST, Thu Jan 02 01:00:00 2020 PST ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" -LOG: inserted 7 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" -DEBUG: hypertable 35 existing watermark >= new watermark 1577998800000000 1577955600000000 +psql:include/cagg_query_common.sql:727: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +psql:include/cagg_query_common.sql:727: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 +psql:include/cagg_query_common.sql:727: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Fri Dec 31 21:00:00 1999 PST, Sun Jan 02 01:00:00 2000 PST ] +psql:include/cagg_query_common.sql:727: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:727: LOG: inserted 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:727: DEBUG: hypertable 35 existing watermark >= new watermark 1577998800000000 946803600000000 +psql:include/cagg_query_common.sql:727: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Tue Dec 31 21:00:00 2019 PST, Thu Jan 02 01:00:00 2020 PST ] +psql:include/cagg_query_common.sql:727: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:727: LOG: inserted 7 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:727: DEBUG: hypertable 35 existing watermark >= new watermark 1577998800000000 1577955600000000 RESET client_min_messages; -LOG: statement: RESET client_min_messages; +psql:include/cagg_query_common.sql:728: LOG: statement: RESET client_min_messages; ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); SELECT * FROM cagg_4_hours; time_bucket | max @@ -1983,7 +1991,7 @@ CREATE MATERIALIZED VIEW cagg_1_year SELECT time_bucket('1 year', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_1_year" +psql:include/cagg_query_common.sql:766: NOTICE: refreshing continuous aggregate "cagg_1_year" SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log ORDER BY 1, 2, 3; materialization_id | lowest_modified_value | greatest_modified_value --------------------+-----------------------+------------------------- @@ -2015,13 +2023,13 @@ CREATE MATERIALIZED VIEW cagg_int AS SELECT time_bucket('10', time), SUM(data) as value FROM table_int GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_int" +psql:include/cagg_query_common.sql:783: NOTICE: refreshing continuous aggregate "cagg_int" CREATE MATERIALIZED VIEW cagg_int_offset WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('10', time, "offset"=>5), SUM(data) as value FROM table_int GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_int_offset" +psql:include/cagg_query_common.sql:789: NOTICE: refreshing continuous aggregate "cagg_int_offset" -- Compare bucketing results SELECT time_bucket('10', time), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; time_bucket | sum @@ -2236,14 +2244,14 @@ SELECT * FROM cagg_int_offset; INSERT INTO table_int VALUES(114, 0); SET client_min_messages TO DEBUG1; CALL refresh_continuous_aggregate('cagg_int_offset', 110, 130); -LOG: statement: CALL refresh_continuous_aggregate('cagg_int_offset', 110, 130); -DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_int_offset" in window [ 105, 135 ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_38" -DEBUG: building index "_hyper_38_67_chunk__materialized_hypertable_38_time_bucket_idx" on table "_hyper_38_67_chunk" serially -DEBUG: index "_hyper_38_67_chunk__materialized_hypertable_38_time_bucket_idx" can safely use deduplication -LOG: inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_38" +psql:include/cagg_query_common.sql:824: LOG: statement: CALL refresh_continuous_aggregate('cagg_int_offset', 110, 130); +psql:include/cagg_query_common.sql:824: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_int_offset" in window [ 105, 135 ] +psql:include/cagg_query_common.sql:824: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_38" +psql:include/cagg_query_common.sql:824: DEBUG: building index "_hyper_38_67_chunk__materialized_hypertable_38_time_bucket_idx" on table "_hyper_38_67_chunk" serially +psql:include/cagg_query_common.sql:824: DEBUG: index "_hyper_38_67_chunk__materialized_hypertable_38_time_bucket_idx" can safely use deduplication +psql:include/cagg_query_common.sql:824: LOG: inserted 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_38" RESET client_min_messages; -LOG: statement: RESET client_min_messages; +psql:include/cagg_query_common.sql:825: LOG: statement: RESET client_min_messages; SELECT * FROM cagg_int_offset; time_bucket | value -------------+------- @@ -2286,7 +2294,7 @@ CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin SELECT time_bucket('1 year', time, origin=>'2000-01-01 01:05:00 UTC'::timestamptz, timezone=>'UTC') AS hour_bucket, max(value) AS max_value FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin" +psql:include/cagg_query_common.sql:835: NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+------------------------------------------+---------------------------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -2294,14 +2302,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fix (1 row) DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin; -NOTICE: drop cascades to 2 other objects +psql:include/cagg_query_common.sql:837: NOTICE: drop cascades to 2 other objects -- Variable due to the used timezone CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin2 WITH (timescaledb.continuous) AS SELECT time_bucket('1 hour', time, origin=>'2000-01-01 01:05:00 UTC'::timestamptz, timezone=>'UTC') AS hour_bucket, max(value) AS max_value FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin2" +psql:include/cagg_query_common.sql:844: NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin2" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin2'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+-------------------------------------------+---------------------------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -2309,14 +2317,14 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fix (1 row) DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin2; -NOTICE: drop cascades to 2 other objects +psql:include/cagg_query_common.sql:846: NOTICE: drop cascades to 2 other objects -- Variable with offset CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin3 WITH (timescaledb.continuous) AS SELECT time_bucket('1 year', time, "offset"=>'5 minutes'::interval) AS hour_bucket, max(value) AS max_value FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin3" +psql:include/cagg_query_common.sql:853: NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin3" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin3'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+-------------------------------------------+----------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- @@ -2324,7 +2332,7 @@ SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fix (1 row) DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin3; -NOTICE: drop cascades to 2 other objects +psql:include/cagg_query_common.sql:855: NOTICE: drop cascades to 2 other objects --- -- Test with blocking a few broken configurations --- @@ -2337,46 +2345,46 @@ CREATE MATERIALIZED VIEW cagg_1_hour_origin SELECT time_bucket('1 hour', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS hour_bucket, max(value) AS max_value FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_1_hour_origin" +psql:include/cagg_query_common.sql:870: NOTICE: refreshing continuous aggregate "cagg_1_hour_origin" CREATE MATERIALIZED VIEW cagg_1_week_origin WITH (timescaledb.continuous) AS SELECT time_bucket('1 week', hour_bucket, origin=>'2022-01-02 01:00:00 PST'::timestamptz) AS week_bucket, max(max_value) AS max_value FROM cagg_1_hour_origin GROUP BY 1 ORDER BY 1; -ERROR: cannot create continuous aggregate with different bucket origin values +psql:include/cagg_query_common.sql:876: ERROR: cannot create continuous aggregate with different bucket origin values -- Different time offset CREATE MATERIALIZED VIEW cagg_1_hour_offset WITH (timescaledb.continuous) AS SELECT time_bucket('1 hour', time, "offset"=>'30m'::interval) AS hour_bucket, max(value) AS max_value FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_1_hour_offset" +psql:include/cagg_query_common.sql:883: NOTICE: refreshing continuous aggregate "cagg_1_hour_offset" CREATE MATERIALIZED VIEW cagg_1_week_offset WITH (timescaledb.continuous) AS SELECT time_bucket('1 week', hour_bucket, "offset"=>'35m'::interval) AS week_bucket, max(max_value) AS max_value FROM cagg_1_hour_offset GROUP BY 1 ORDER BY 1; -ERROR: cannot create continuous aggregate with different bucket offset values +psql:include/cagg_query_common.sql:889: ERROR: cannot create continuous aggregate with different bucket offset values -- Different integer offset CREATE MATERIALIZED VIEW cagg_int_offset_5 WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('10', time, "offset"=>5) AS time, SUM(data) AS value FROM table_int GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_int_offset_5" +psql:include/cagg_query_common.sql:896: NOTICE: refreshing continuous aggregate "cagg_int_offset_5" CREATE MATERIALIZED VIEW cagg_int_offset_10 WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('10', time, "offset"=>10) AS time, SUM(value) AS value FROM cagg_int_offset_5 GROUP BY 1 ORDER BY 1; -ERROR: cannot create continuous aggregate with different bucket offset values +psql:include/cagg_query_common.sql:902: ERROR: cannot create continuous aggregate with different bucket offset values \set ON_ERROR_STOP 1 DROP MATERIALIZED VIEW cagg_1_hour_origin; -NOTICE: drop cascades to 2 other objects +psql:include/cagg_query_common.sql:906: NOTICE: drop cascades to 2 other objects DROP MATERIALIZED VIEW cagg_1_hour_offset; -NOTICE: drop cascades to 2 other objects +psql:include/cagg_query_common.sql:907: NOTICE: drop cascades to 2 other objects DROP MATERIALIZED VIEW cagg_int_offset_5; -NOTICE: drop cascades to 3 other objects +psql:include/cagg_query_common.sql:908: NOTICE: drop cascades to 3 other objects --- -- CAGGs on CAGGs tests --- @@ -2385,7 +2393,7 @@ CREATE MATERIALIZED VIEW cagg_1_hour_offset SELECT time_bucket('1 hour', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS hour_bucket, max(value) AS max_value FROM temperature GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_1_hour_offset" +psql:include/cagg_query_common.sql:917: NOTICE: refreshing continuous aggregate "cagg_1_hour_offset" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_offset'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+--------------------+--------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- @@ -2397,7 +2405,7 @@ CREATE MATERIALIZED VIEW cagg_1_week_offset SELECT time_bucket('1 week', hour_bucket, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS week_bucket, max(max_value) AS max_value FROM cagg_1_hour_offset GROUP BY 1 ORDER BY 1; -NOTICE: refreshing continuous aggregate "cagg_1_week_offset" +psql:include/cagg_query_common.sql:924: NOTICE: refreshing continuous aggregate "cagg_1_week_offset" SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_week_offset'; user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width ------------------+--------------------+--------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- diff --git a/tsl/test/expected/cagg_query_using_merge.out b/tsl/test/expected/cagg_query_using_merge.out new file mode 100644 index 00000000000..3fc68c1fd78 --- /dev/null +++ b/tsl/test/expected/cagg_query_using_merge.out @@ -0,0 +1,2489 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Connect as superuser to use SET ROLE later +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +-- Run tests with default role +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- Enable MERGE statements for continuous aggregate refresh +SET timescaledb.enable_merge_on_cagg_refresh TO ON; +\set TEST_BASE_NAME cagg_query_using_merge +\ir include/cagg_query_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +SELECT + format('%s/results/%s_results_view.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW", + format('%s/results/%s_results_view_hashagg.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW_HASHAGG", + format('%s/results/%s_results_table.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_TABLE" +\gset +SELECT format('\! diff %s %s', :'TEST_RESULTS_VIEW', :'TEST_RESULTS_TABLE') as "DIFF_CMD", + format('\! diff %s %s', :'TEST_RESULTS_VIEW_HASHAGG', :'TEST_RESULTS_TABLE') as "DIFF_CMD2" +\gset +\set EXPLAIN 'EXPLAIN (VERBOSE, COSTS OFF)' +SET client_min_messages TO NOTICE; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec'); + table_name +------------ + conditions +(1 row) + +insert into conditions values ( '2018-01-01 09:20:00-08', 'SFO', 55, 45); +insert into conditions values ( '2018-01-02 09:30:00-08', 'por', 100, 100); +insert into conditions values ( '2018-01-02 09:20:00-08', 'SFO', 65, 45); +insert into conditions values ( '2018-01-02 09:10:00-08', 'NYC', 65, 45); +insert into conditions values ( '2018-11-01 09:20:00-08', 'NYC', 45, 30); +insert into conditions values ( '2018-11-01 10:40:00-08', 'NYC', 55, 35); +insert into conditions values ( '2018-11-01 11:50:00-08', 'NYC', 65, 40); +insert into conditions values ( '2018-11-01 12:10:00-08', 'NYC', 75, 45); +insert into conditions values ( '2018-11-01 13:10:00-08', 'NYC', 85, 50); +insert into conditions values ( '2018-11-02 09:20:00-08', 'NYC', 10, 10); +insert into conditions values ( '2018-11-02 10:30:00-08', 'NYC', 20, 15); +insert into conditions values ( '2018-11-02 11:40:00-08', 'NYC', null, null); +insert into conditions values ( '2018-11-03 09:50:00-08', 'NYC', null, null); +create table location_tab( locid integer, locname text ); +insert into location_tab values( 1, 'SFO'); +insert into location_tab values( 2, 'NYC'); +insert into location_tab values( 3, 'por'); +create materialized view mat_m1( location, timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec), location WITH NO DATA; +--compute time_bucketted max+bucket_width for the materialized view +SELECT time_bucket('1day' , q.timeval+ '1day'::interval) +FROM ( select max(timec)as timeval from conditions ) as q; + time_bucket +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +CALL refresh_continuous_aggregate('mat_m1', NULL, NULL); +--test first/last +create materialized view mat_m2(location, timec, firsth, lasth, maxtemp, mintemp) +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, time_bucket('1day', timec), first(humidity, timec), last(humidity, timec), max(temperature), min(temperature) +from conditions +group by time_bucket('1day', timec), location WITH NO DATA; +--time that refresh assumes as now() for repeatability +SELECT time_bucket('1day' , q.timeval+ '1day'::interval) +FROM ( select max(timec)as timeval from conditions ) as q; + time_bucket +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +CALL refresh_continuous_aggregate('mat_m2', NULL, NULL); +--normal view -- +create or replace view regview( location, timec, minl, sumt , sumh) +as +select location, time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by location, time_bucket('1day', timec); +set enable_hashagg = false; +-- NO pushdown cases --- +--when we have addl. attrs in order by that are not in the +-- group by, we will still need a sort +:EXPLAIN +select * from mat_m1 order by sumh, sumt, minl, timec ; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Sort Key: _hyper_2_3_chunk.sumh, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.timec + -> Append + -> Append + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), min(_hyper_1_2_chunk.location), sum(_hyper_1_2_chunk.temperature), sum(_hyper_1_2_chunk.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) +(22 rows) + +:EXPLAIN +select * from regview order by timec desc; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), (min(_hyper_1_1_chunk.location)), (sum(_hyper_1_1_chunk.temperature)), (sum(_hyper_1_1_chunk.humidity)) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) DESC + -> Finalize GroupAggregate + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), min(_hyper_1_1_chunk.location), sum(_hyper_1_1_chunk.temperature), sum(_hyper_1_1_chunk.humidity) + Group Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Sort + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), (PARTIAL min(_hyper_1_1_chunk.location)), (PARTIAL sum(_hyper_1_1_chunk.temperature)), (PARTIAL sum(_hyper_1_1_chunk.humidity)) + Sort Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Append + -> Partial GroupAggregate + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), PARTIAL min(_hyper_1_1_chunk.location), PARTIAL sum(_hyper_1_1_chunk.temperature), PARTIAL sum(_hyper_1_1_chunk.humidity) + Group Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Sort + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + Sort Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec), _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + -> Partial GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), PARTIAL min(_hyper_1_2_chunk.location), PARTIAL sum(_hyper_1_2_chunk.temperature), PARTIAL sum(_hyper_1_2_chunk.humidity) + Group Key: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)) + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity +(26 rows) + +-- PUSHDOWN cases -- +-- all group by elts in order by , reorder group by elts to match +-- group by order +-- This should prevent an additional sort after GroupAggregate +:EXPLAIN +select * from mat_m1 order by timec desc, location; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Sort Key: _hyper_2_3_chunk.timec DESC, _hyper_2_3_chunk.location + -> Append + -> Append + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), min(_hyper_1_2_chunk.location), sum(_hyper_1_2_chunk.temperature), sum(_hyper_1_2_chunk.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) +(22 rows) + +:EXPLAIN +select * from mat_m1 order by location, timec desc; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Sort Key: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec DESC + -> Append + -> Append + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), min(_hyper_1_2_chunk.location), sum(_hyper_1_2_chunk.temperature), sum(_hyper_1_2_chunk.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) +(22 rows) + +:EXPLAIN +select * from mat_m1 order by location, timec asc; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Sort Key: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec + -> Append + -> Append + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), min(_hyper_1_2_chunk.location), sum(_hyper_1_2_chunk.temperature), sum(_hyper_1_2_chunk.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) +(22 rows) + +:EXPLAIN +select * from mat_m1 where timec > '2018-10-01' order by timec desc; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Sort Key: _hyper_2_4_chunk.timec DESC + -> Append + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: ((_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_2_4_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), min(_hyper_1_2_chunk.location), sum(_hyper_1_2_chunk.temperature), sum(_hyper_1_2_chunk.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: ((_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(19 rows) + +-- outer sort is used by mat_m1 for grouping. But doesn't avoid a sort after the join --- +:EXPLAIN +select l.locid, mat_m1.* from mat_m1 , location_tab l where timec > '2018-10-01' and l.locname = mat_m1.location order by timec desc; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: l.locid, _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Sort Key: _hyper_2_4_chunk.timec DESC + -> Hash Join + Output: l.locid, _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Hash Cond: (l.locname = _hyper_2_4_chunk.location) + -> Seq Scan on public.location_tab l + Output: l.locid, l.locname + -> Hash + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + -> Append + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: ((_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_2_4_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), min(_hyper_1_2_chunk.location), sum(_hyper_1_2_chunk.temperature), sum(_hyper_1_2_chunk.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: ((_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(26 rows) + +:EXPLAIN +select * from mat_m2 where timec > '2018-10-01' order by timec desc; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Sort + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Sort Key: _hyper_3_6_chunk.timec DESC + -> Append + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: ((_hyper_3_6_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_3_6_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), first(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), last(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), max(_hyper_1_2_chunk.temperature), min(_hyper_1_2_chunk.temperature) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: ((_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(19 rows) + +:EXPLAIN +select * from (select * from mat_m2 where timec > '2018-10-01' order by timec desc ) as q limit 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + -> Sort + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Sort Key: _hyper_3_6_chunk.timec DESC + -> Append + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: ((_hyper_3_6_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_3_6_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), first(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), last(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), max(_hyper_1_2_chunk.temperature), min(_hyper_1_2_chunk.temperature) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: ((_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(21 rows) + +:EXPLAIN +select * from (select * from mat_m2 where timec > '2018-10-01' order by timec desc , location asc nulls first) as q limit 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Limit + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + -> Sort + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Sort Key: _hyper_3_6_chunk.timec DESC, _hyper_3_6_chunk.location NULLS FIRST + -> Append + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: ((_hyper_3_6_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_3_6_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), first(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), last(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), max(_hyper_1_2_chunk.temperature), min(_hyper_1_2_chunk.temperature) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: ((_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(21 rows) + +--plans with CTE +:EXPLAIN +with m1 as ( +Select * from mat_m2 where timec > '2018-10-01' order by timec desc ) +select * from m1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Sort + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Sort Key: _hyper_3_6_chunk.timec DESC + -> Append + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: ((_hyper_3_6_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_3_6_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), first(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), last(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), max(_hyper_1_2_chunk.temperature), min(_hyper_1_2_chunk.temperature) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: ((_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_1_2_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(19 rows) + +-- should reorder mat_m1 group by only based on mat_m1 order-by +:EXPLAIN +select * from mat_m1, mat_m2 where mat_m1.timec > '2018-10-01' and mat_m1.timec = mat_m2.timec order by mat_m1.timec desc; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Sort + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh, _hyper_3_5_chunk.location, _hyper_3_5_chunk.timec, _hyper_3_5_chunk.firsth, _hyper_3_5_chunk.lasth, _hyper_3_5_chunk.maxtemp, _hyper_3_5_chunk.mintemp + Sort Key: _hyper_2_4_chunk.timec DESC + -> Hash Join + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh, _hyper_3_5_chunk.location, _hyper_3_5_chunk.timec, _hyper_3_5_chunk.firsth, _hyper_3_5_chunk.lasth, _hyper_3_5_chunk.maxtemp, _hyper_3_5_chunk.mintemp + Hash Cond: (_hyper_3_5_chunk.timec = _hyper_2_4_chunk.timec) + -> Append + -> Append + -> Index Scan using _hyper_3_5_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_5_chunk + Output: _hyper_3_5_chunk.location, _hyper_3_5_chunk.timec, _hyper_3_5_chunk.firsth, _hyper_3_5_chunk.lasth, _hyper_3_5_chunk.maxtemp, _hyper_3_5_chunk.mintemp + Index Cond: (_hyper_3_5_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> Index Scan using _hyper_3_6_chunk__materialized_hypertable_3_timec_idx on _timescaledb_internal._hyper_3_6_chunk + Output: _hyper_3_6_chunk.location, _hyper_3_6_chunk.timec, _hyper_3_6_chunk.firsth, _hyper_3_6_chunk.lasth, _hyper_3_6_chunk.maxtemp, _hyper_3_6_chunk.mintemp + Index Cond: (_hyper_3_6_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), first(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), last(_hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec), max(_hyper_1_2_chunk.temperature), min(_hyper_1_2_chunk.temperature) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.humidity, _hyper_1_2_chunk.temperature + Index Cond: (_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> Hash + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + -> Append + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: ((_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_2_4_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: _hyper_1_2_chunk_1.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec)), min(_hyper_1_2_chunk_1.location), sum(_hyper_1_2_chunk_1.temperature), sum(_hyper_1_2_chunk_1.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec)), _hyper_1_2_chunk_1.location + -> Sort + Output: _hyper_1_2_chunk_1.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec)), _hyper_1_2_chunk_1.temperature, _hyper_1_2_chunk_1.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec)), _hyper_1_2_chunk_1.location + -> Result + Output: _hyper_1_2_chunk_1.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec), _hyper_1_2_chunk_1.temperature, _hyper_1_2_chunk_1.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk _hyper_1_2_chunk_1 + Output: _hyper_1_2_chunk_1.location, _hyper_1_2_chunk_1.timec, _hyper_1_2_chunk_1.temperature, _hyper_1_2_chunk_1.humidity + Index Cond: ((_hyper_1_2_chunk_1.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_1_2_chunk_1.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(43 rows) + +--should reorder only for mat_m1. +:EXPLAIN +select * from mat_m1, regview where mat_m1.timec > '2018-10-01' and mat_m1.timec = regview.timec order by mat_m1.timec desc; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh, _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), (min(_hyper_1_1_chunk.location)), (sum(_hyper_1_1_chunk.temperature)), (sum(_hyper_1_1_chunk.humidity)) + Sort Key: _hyper_2_4_chunk.timec DESC + -> Hash Join + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh, _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), (min(_hyper_1_1_chunk.location)), (sum(_hyper_1_1_chunk.temperature)), (sum(_hyper_1_1_chunk.humidity)) + Hash Cond: ((time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) = _hyper_2_4_chunk.timec) + -> Finalize GroupAggregate + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), min(_hyper_1_1_chunk.location), sum(_hyper_1_1_chunk.temperature), sum(_hyper_1_1_chunk.humidity) + Group Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Sort + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), (PARTIAL min(_hyper_1_1_chunk.location)), (PARTIAL sum(_hyper_1_1_chunk.temperature)), (PARTIAL sum(_hyper_1_1_chunk.humidity)) + Sort Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Append + -> Partial GroupAggregate + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), PARTIAL min(_hyper_1_1_chunk.location), PARTIAL sum(_hyper_1_1_chunk.temperature), PARTIAL sum(_hyper_1_1_chunk.humidity) + Group Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Sort + Output: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)), _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + Sort Key: _hyper_1_1_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec)) + -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.timec), _hyper_1_1_chunk.temperature, _hyper_1_1_chunk.humidity + -> Partial GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), PARTIAL min(_hyper_1_2_chunk.location), PARTIAL sum(_hyper_1_2_chunk.temperature), PARTIAL sum(_hyper_1_2_chunk.humidity) + Group Key: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)) + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)) + -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Hash + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + -> Append + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: ((_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_2_4_chunk.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + -> GroupAggregate + Output: _hyper_1_2_chunk_1.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec)), min(_hyper_1_2_chunk_1.location), sum(_hyper_1_2_chunk_1.temperature), sum(_hyper_1_2_chunk_1.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec)), _hyper_1_2_chunk_1.location + -> Sort + Output: _hyper_1_2_chunk_1.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec)), _hyper_1_2_chunk_1.temperature, _hyper_1_2_chunk_1.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec)), _hyper_1_2_chunk_1.location + -> Result + Output: _hyper_1_2_chunk_1.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec), _hyper_1_2_chunk_1.temperature, _hyper_1_2_chunk_1.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk _hyper_1_2_chunk_1 + Output: _hyper_1_2_chunk_1.location, _hyper_1_2_chunk_1.timec, _hyper_1_2_chunk_1.temperature, _hyper_1_2_chunk_1.humidity + Index Cond: ((_hyper_1_2_chunk_1.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) AND (_hyper_1_2_chunk_1.timec > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone)) + Filter: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk_1.timec) > 'Mon Oct 01 00:00:00 2018 PDT'::timestamp with time zone) +(47 rows) + +select l.locid, mat_m1.* from mat_m1 , location_tab l where timec > '2018-10-01' and l.locname = mat_m1.location order by timec desc; + locid | location | timec | minl | sumt | sumh +-------+----------+------------------------------+------+------+------ + 2 | NYC | Fri Nov 02 17:00:00 2018 PDT | NYC | | + 2 | NYC | Thu Nov 01 17:00:00 2018 PDT | NYC | 30 | 25 + 2 | NYC | Wed Oct 31 17:00:00 2018 PDT | NYC | 325 | 200 +(3 rows) + +\set ECHO none +---- Run the same queries with hash agg enabled now +set enable_hashagg = true; +\set ECHO none +--- Run the queries directly on the table now +set enable_hashagg = true; +\set ECHO none +-- diff results view select and table select +:DIFF_CMD +:DIFF_CMD2 +--check if the guc works , reordering will not work +set timescaledb.enable_cagg_reorder_groupby = false; +set enable_hashagg = false; +:EXPLAIN +select * from mat_m1 order by timec desc, location; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Sort Key: _hyper_2_3_chunk.timec DESC, _hyper_2_3_chunk.location + -> Append + -> Append + -> Index Scan using _hyper_2_3_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_3_chunk + Output: _hyper_2_3_chunk.location, _hyper_2_3_chunk.timec, _hyper_2_3_chunk.minl, _hyper_2_3_chunk.sumt, _hyper_2_3_chunk.sumh + Index Cond: (_hyper_2_3_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> Index Scan using _hyper_2_4_chunk__materialized_hypertable_2_timec_idx on _timescaledb_internal._hyper_2_4_chunk + Output: _hyper_2_4_chunk.location, _hyper_2_4_chunk.timec, _hyper_2_4_chunk.minl, _hyper_2_4_chunk.sumt, _hyper_2_4_chunk.sumh + Index Cond: (_hyper_2_4_chunk.timec < 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) + -> GroupAggregate + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), min(_hyper_1_2_chunk.location), sum(_hyper_1_2_chunk.temperature), sum(_hyper_1_2_chunk.humidity) + Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Sort + Output: _hyper_1_2_chunk.location, (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec)), _hyper_1_2_chunk.location + -> Result + Output: _hyper_1_2_chunk.location, time_bucket('@ 1 day'::interval, _hyper_1_2_chunk.timec), _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + -> Index Scan using _hyper_1_2_chunk_conditions_timec_idx on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.location, _hyper_1_2_chunk.timec, _hyper_1_2_chunk.temperature, _hyper_1_2_chunk.humidity + Index Cond: (_hyper_1_2_chunk.timec >= 'Sat Nov 03 17:00:00 2018 PDT'::timestamp with time zone) +(22 rows) + +----------------------------------------------------------------------- +-- Test the cagg_watermark function. The watermark gives the point +-- where to UNION raw and materialized data in real-time +-- aggregation. Specifically, test that the watermark caching works as +-- expected. +----------------------------------------------------------------------- +-- Insert some more data so that there is something to UNION in +-- real-time aggregation. +insert into conditions values ( '2018-12-02 20:10:00-08', 'SFO', 55, 45); +insert into conditions values ( '2018-12-02 21:20:00-08', 'SFO', 65, 45); +insert into conditions values ( '2018-12-02 20:30:00-08', 'NYC', 65, 45); +insert into conditions values ( '2018-12-02 21:50:00-08', 'NYC', 45, 30); +-- Test join of two caggs. Joining two caggs will force the cache to +-- reset every time the watermark function is invoked on a different +-- cagg in the same query. +SELECT mat_hypertable_id AS mat_id, + raw_hypertable_id AS raw_id, + schema_name AS mat_schema, + table_name AS mat_name, + format('%I.%I', schema_name, table_name) AS mat_table +FROM _timescaledb_catalog.continuous_agg ca, _timescaledb_catalog.hypertable h +WHERE user_view_name='mat_m1' +AND h.id = ca.mat_hypertable_id \gset +BEGIN; +-- Query without join +SELECT m1.location, m1.timec, sumt, sumh +FROM mat_m1 m1 +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + location | timec | sumt | sumh +----------+------------------------------+------+------ + NYC | Sun Dec 02 16:00:00 2018 PST | 110 | 75 + NYC | Fri Nov 02 17:00:00 2018 PDT | | + NYC | Thu Nov 01 17:00:00 2018 PDT | 30 | 25 + NYC | Wed Oct 31 17:00:00 2018 PDT | 325 | 200 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 | 45 + SFO | Sun Dec 02 16:00:00 2018 PST | 120 | 90 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 | 45 + SFO | Sun Dec 31 16:00:00 2017 PST | 55 | 45 + por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 +(9 rows) + +-- Query that joins two caggs. This should force the watermark cache +-- to reset when the materialized hypertable ID changes. A hash join +-- could potentially read all values from mat_m1 then all values from +-- mat_m2. This would be the optimal situation for cagg_watermark +-- caching. We want to avoid it in tests to see that caching doesn't +-- do anything wrong in worse situations (e.g., a nested loop join). +SET enable_hashjoin=false; +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp +----------+------------------------------+------+------+--------+-------+---------+--------- + NYC | Sun Dec 02 16:00:00 2018 PST | 110 | 75 | 45 | 30 | 65 | 45 + NYC | Fri Nov 02 17:00:00 2018 PDT | | | | | | + NYC | Thu Nov 01 17:00:00 2018 PDT | 30 | 25 | 10 | | 20 | 10 + NYC | Wed Oct 31 17:00:00 2018 PDT | 325 | 200 | 30 | 50 | 85 | 45 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 02 16:00:00 2018 PST | 120 | 90 | 45 | 45 | 65 | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 31 16:00:00 2017 PST | 55 | 45 | 45 | 45 | 55 | 55 + por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 +(9 rows) + +-- Show the current watermark +SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_id)); + to_timestamp +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +-- The watermark should, in this case, be the same as the invalidation +-- threshold +SELECT _timescaledb_functions.to_timestamp(watermark) +FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :raw_id; + to_timestamp +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +-- The watermark is the end of materialization (end of last bucket) +-- while the MAX is the start of the last bucket +SELECT max(timec) FROM :mat_table; + max +------------------------------ + Fri Nov 02 17:00:00 2018 PDT +(1 row) + +-- Drop the most recent chunk +SELECT chunk_name, range_start, range_end +FROM timescaledb_information.chunks +WHERE hypertable_name = :'mat_name'; + chunk_name | range_start | range_end +------------------+------------------------------+------------------------------ + _hyper_2_3_chunk | Wed Nov 29 16:00:00 2017 PST | Wed Feb 07 16:00:00 2018 PST + _hyper_2_4_chunk | Wed Sep 05 17:00:00 2018 PDT | Wed Nov 14 16:00:00 2018 PST +(2 rows) + +SELECT drop_chunks('mat_m1', newer_than=>'2018-01-01'::timestamptz); + drop_chunks +---------------------------------------- + _timescaledb_internal._hyper_2_4_chunk +(1 row) + +SELECT chunk_name, range_start, range_end +FROM timescaledb_information.chunks +WHERE hypertable_name = :'mat_name'; + chunk_name | range_start | range_end +------------------+------------------------------+------------------------------ + _hyper_2_3_chunk | Wed Nov 29 16:00:00 2017 PST | Wed Feb 07 16:00:00 2018 PST +(1 row) + +-- The watermark should be updated to reflect the dropped data (i.e., +-- the cache should be reset) +SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_id)); + to_timestamp +------------------------------ + Tue Jan 02 16:00:00 2018 PST +(1 row) + +-- Since we removed the last chunk, the invalidation threshold doesn't +-- move back, while the watermark does. +SELECT _timescaledb_functions.to_timestamp(watermark) +FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :raw_id; + to_timestamp +------------------------------ + Sat Nov 03 17:00:00 2018 PDT +(1 row) + +-- Compare the new watermark to the MAX time in the table +SELECT max(timec) FROM :mat_table; + max +------------------------------ + Mon Jan 01 16:00:00 2018 PST +(1 row) + +-- Try a subtransaction +SAVEPOINT clear_cagg; +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp +----------+------------------------------+------+------+--------+-------+---------+--------- + NYC | Sun Dec 02 16:00:00 2018 PST | 110 | 75 | 45 | 30 | 65 | 45 + NYC | Fri Nov 02 17:00:00 2018 PDT | | | | | | + NYC | Thu Nov 01 17:00:00 2018 PDT | 30 | 25 | 10 | | 20 | 10 + NYC | Wed Oct 31 17:00:00 2018 PDT | 325 | 200 | 30 | 50 | 85 | 45 + NYC | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 02 16:00:00 2018 PST | 120 | 90 | 45 | 45 | 65 | 55 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 31 16:00:00 2017 PST | 55 | 45 | 45 | 45 | 55 | 55 + por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 +(9 rows) + +ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST, + lasth NULLS LAST, mintemp NULLS LAST, maxtemp NULLS LAST +LIMIT 10; + location | timec | sumt | sumh | firsth | lasth | maxtemp | mintemp +----------+------------------------------+------+------+--------+-------+---------+--------- + NYC | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 | 45 | 45 | 45 | 65 | 65 + SFO | Sun Dec 31 16:00:00 2017 PST | 55 | 45 | 45 | 45 | 55 | 55 + por | Mon Jan 01 16:00:00 2018 PST | 100 | 100 | 100 | 100 | 100 | 100 + | | | | 10 | | 20 | 10 + | | | | 30 | 50 | 85 | 45 + | | | | 45 | 30 | 65 | 45 + | | | | 45 | 45 | 65 | 55 + | | | | | | | +(9 rows) + +ROLLBACK; +----- +-- Tests with time_bucket and offset/origin +----- +CREATE TABLE temperature ( + time timestamptz NOT NULL, + value float +); +SELECT create_hypertable('temperature', 'time'); + create_hypertable +-------------------------- + (4,public,temperature,t) +(1 row) + +INSERT INTO temperature VALUES ('2000-01-01 01:00:00'::timestamptz, 5); +CREATE TABLE temperature_wo_tz ( + time timestamp NOT NULL, + value float +); +SELECT create_hypertable('temperature_wo_tz', 'time'); +psql:include/cagg_query_common.sql:316: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + create_hypertable +-------------------------------- + (5,public,temperature_wo_tz,t) +(1 row) + +INSERT INTO temperature_wo_tz VALUES ('2000-01-01 01:00:00'::timestamp, 5); +CREATE TABLE temperature_date ( + time date NOT NULL, + value float +); +SELECT create_hypertable('temperature_date', 'time'); + create_hypertable +------------------------------- + (6,public,temperature_date,t) +(1 row) + +INSERT INTO temperature_date VALUES ('2000-01-01 01:00:00'::timestamp, 5); +-- Integer based tables +CREATE TABLE table_smallint ( + time smallint, + data smallint +); +CREATE TABLE table_int ( + time int, + data int +); +CREATE TABLE table_bigint ( + time bigint, + data bigint +); +SELECT create_hypertable('table_smallint', 'time', chunk_time_interval => 10); +psql:include/cagg_query_common.sql:345: NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------------- + (7,public,table_smallint,t) +(1 row) + +SELECT create_hypertable('table_int', 'time', chunk_time_interval => 10); +psql:include/cagg_query_common.sql:346: NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------ + (8,public,table_int,t) +(1 row) + +SELECT create_hypertable('table_bigint', 'time', chunk_time_interval => 10); +psql:include/cagg_query_common.sql:347: NOTICE: adding not-null constraint to column "time" + create_hypertable +--------------------------- + (9,public,table_bigint,t) +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_smallint() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_smallint $$; +CREATE OR REPLACE FUNCTION integer_now_int() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_int $$; +CREATE OR REPLACE FUNCTION integer_now_bigint() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_bigint $$; +SELECT set_integer_now_func('table_smallint', 'integer_now_smallint'); + set_integer_now_func +---------------------- + +(1 row) + +SELECT set_integer_now_func('table_int', 'integer_now_int'); + set_integer_now_func +---------------------- + +(1 row) + +SELECT set_integer_now_func('table_bigint', 'integer_now_bigint'); + set_integer_now_func +---------------------- + +(1 row) + +INSERT INTO table_smallint VALUES(1,2); +INSERT INTO table_int VALUES(1,2); +INSERT INTO table_bigint VALUES(1,2); +CREATE VIEW caggs_info AS +SELECT user_view_schema, user_view_name, bucket_func, bucket_width, bucket_origin, bucket_offset, bucket_timezone, bucket_fixed_width +FROM _timescaledb_catalog.continuous_aggs_bucket_function NATURAL JOIN _timescaledb_catalog.continuous_agg; +--- +-- Tests with CAgg creation +--- +CREATE MATERIALIZED VIEW cagg_4_hours + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:372: NOTICE: refreshing continuous aggregate "cagg_4_hours" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+----------------+-------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_4_hours | public.time_bucket(interval,timestamp with time zone) | @ 4 hours | | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours; +psql:include/cagg_query_common.sql:374: NOTICE: drop cascades to table _timescaledb_internal._hyper_10_14_chunk +CREATE MATERIALIZED VIEW cagg_4_hours_offset + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '30m'::interval), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:380: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+---------------------+----------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_4_hours_offset | public.time_bucket(interval,timestamp with time zone,interval) | @ 4 hours | | @ 30 mins | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_offset; +psql:include/cagg_query_common.sql:382: NOTICE: drop cascades to table _timescaledb_internal._hyper_11_15_chunk +CREATE MATERIALIZED VIEW cagg_4_hours_offset2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:388: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset2" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset2'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+----------------------+----------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_4_hours_offset2 | public.time_bucket(interval,timestamp with time zone,interval) | @ 4 hours | | @ 30 mins | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_offset2; +psql:include/cagg_query_common.sql:390: NOTICE: drop cascades to table _timescaledb_internal._hyper_12_16_chunk +-- Variable buckets (timezone is provided) with offset +CREATE MATERIALIZED VIEW cagg_4_hours_offset_ts + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, timezone=>'UTC'), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:397: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset_ts" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_ts'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+------------------------+---------------------------------------------------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_4_hours_offset_ts | public.time_bucket(interval,timestamp with time zone,pg_catalog.text,timestamp with time zone,interval) | @ 4 hours | | @ 30 mins | UTC | f +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_offset_ts; +psql:include/cagg_query_common.sql:399: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_17_chunk +CREATE MATERIALIZED VIEW cagg_4_hours_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:405: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+---------------------+--------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_4_hours_origin | public.time_bucket(interval,timestamp with time zone,timestamp with time zone) | @ 4 hours | Sat Jan 01 01:00:00 2000 PST | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_origin; +psql:include/cagg_query_common.sql:407: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_18_chunk +-- Using named parameter +CREATE MATERIALIZED VIEW cagg_4_hours_origin2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:414: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin2" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin2'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+----------------------+--------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_4_hours_origin2 | public.time_bucket(interval,timestamp with time zone,timestamp with time zone) | @ 4 hours | Sat Jan 01 01:00:00 2000 PST | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_origin2; +psql:include/cagg_query_common.sql:416: NOTICE: drop cascades to table _timescaledb_internal._hyper_15_19_chunk +-- Variable buckets (timezone is provided) with origin +CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00 PST'::timestamptz, timezone=>'UTC'), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:423: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+------------------------+---------------------------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_4_hours_origin_ts | public.time_bucket(interval,timestamp with time zone,pg_catalog.text,timestamp with time zone,interval) | @ 4 hours | Sat Jan 01 01:00:00 2000 PST | | UTC | f +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_origin_ts; +psql:include/cagg_query_common.sql:425: NOTICE: drop cascades to table _timescaledb_internal._hyper_16_20_chunk +-- Without named parameter +CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, 'UTC', '2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:432: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts2" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts2'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+-------------------------+---------------------------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_4_hours_origin_ts2 | public.time_bucket(interval,timestamp with time zone,pg_catalog.text,timestamp with time zone,interval) | @ 4 hours | Sat Jan 01 01:00:00 2000 PST | | UTC | f +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_origin_ts2; +psql:include/cagg_query_common.sql:434: NOTICE: drop cascades to table _timescaledb_internal._hyper_17_21_chunk +-- Timestamp based CAggs +CREATE MATERIALIZED VIEW cagg_4_hours_wo_tz + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time), max(value) + FROM temperature_wo_tz + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:441: NOTICE: refreshing continuous aggregate "cagg_4_hours_wo_tz" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_wo_tz'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+--------------------+----------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_4_hours_wo_tz | public.time_bucket(interval,timestamp without time zone) | @ 4 hours | | | | t +(1 row) + +CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00'::timestamp), max(value) + FROM temperature_wo_tz + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:448: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts_wo_tz" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+------------------------------+--------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_4_hours_origin_ts_wo_tz | public.time_bucket(interval,timestamp without time zone,timestamp without time zone) | @ 4 hours | Fri Dec 31 17:00:00 1999 PST | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz; +psql:include/cagg_query_common.sql:450: NOTICE: drop cascades to table _timescaledb_internal._hyper_19_23_chunk +-- Variable buckets (timezone is provided) with origin +CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00'::timestamp), max(value) + FROM temperature_wo_tz + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:457: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin_ts_wo_tz2" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz2'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+-------------------------------+--------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_4_hours_origin_ts_wo_tz2 | public.time_bucket(interval,timestamp without time zone,timestamp without time zone) | @ 4 hours | Fri Dec 31 17:00:00 1999 PST | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz2; +psql:include/cagg_query_common.sql:459: NOTICE: drop cascades to table _timescaledb_internal._hyper_20_24_chunk +CREATE MATERIALIZED VIEW cagg_4_hours_offset_wo_tz + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval), max(value) + FROM temperature_wo_tz + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:465: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset_wo_tz" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_wo_tz'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+---------------------------+-------------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_4_hours_offset_wo_tz | public.time_bucket(interval,timestamp without time zone,interval) | @ 4 hours | | @ 30 mins | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_offset_wo_tz; +psql:include/cagg_query_common.sql:467: NOTICE: drop cascades to table _timescaledb_internal._hyper_21_25_chunk +DROP MATERIALIZED VIEW cagg_4_hours_wo_tz; +psql:include/cagg_query_common.sql:468: NOTICE: drop cascades to table _timescaledb_internal._hyper_18_22_chunk +-- Date based CAggs +CREATE MATERIALIZED VIEW cagg_4_hours_date + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 days', time), max(value) + FROM temperature_date + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:475: NOTICE: refreshing continuous aggregate "cagg_4_hours_date" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+-------------------+----------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_4_hours_date | public.time_bucket(interval,pg_catalog.date) | @ 4 days | | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_date; +psql:include/cagg_query_common.sql:477: NOTICE: drop cascades to table _timescaledb_internal._hyper_22_26_chunk +CREATE MATERIALIZED VIEW cagg_4_hours_date_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 days', time, '2000-01-01'::date), max(value) + FROM temperature_date + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:483: NOTICE: refreshing continuous aggregate "cagg_4_hours_date_origin" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+--------------------------+--------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_4_hours_date_origin | public.time_bucket(interval,pg_catalog.date,pg_catalog.date) | @ 4 days | Fri Dec 31 16:00:00 1999 PST | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_date_origin; +psql:include/cagg_query_common.sql:485: NOTICE: drop cascades to table _timescaledb_internal._hyper_23_27_chunk +CREATE MATERIALIZED VIEW cagg_4_hours_date_origin2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 days', time, origin=>'2000-01-01'::date), max(value) + FROM temperature_date + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:491: NOTICE: refreshing continuous aggregate "cagg_4_hours_date_origin2" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin2'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+---------------------------+--------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_4_hours_date_origin2 | public.time_bucket(interval,pg_catalog.date,pg_catalog.date) | @ 4 days | Fri Dec 31 16:00:00 1999 PST | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_date_origin2; +psql:include/cagg_query_common.sql:493: NOTICE: drop cascades to table _timescaledb_internal._hyper_24_28_chunk +CREATE MATERIALIZED VIEW cagg_4_hours_date_offset + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 days', time, "offset"=>'30m'::interval), max(value) + FROM temperature_date + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:499: NOTICE: refreshing continuous aggregate "cagg_4_hours_date_offset" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_offset'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+--------------------------+-------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_4_hours_date_offset | public.time_bucket(interval,pg_catalog.date,interval) | @ 4 days | | @ 30 mins | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_4_hours_date_offset; +psql:include/cagg_query_common.sql:501: NOTICE: drop cascades to table _timescaledb_internal._hyper_25_29_chunk +-- Integer based CAggs +CREATE MATERIALIZED VIEW cagg_smallint + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM table_smallint + GROUP BY 1; +psql:include/cagg_query_common.sql:508: NOTICE: refreshing continuous aggregate "cagg_smallint" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+----------------+---------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_smallint | public.time_bucket(smallint,smallint) | 2 | | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_smallint; +psql:include/cagg_query_common.sql:510: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_30_chunk +CREATE MATERIALIZED VIEW cagg_smallint_offset + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time, "offset"=>1::smallint), SUM(data) as value + FROM table_smallint + GROUP BY 1; +psql:include/cagg_query_common.sql:516: NOTICE: refreshing continuous aggregate "cagg_smallint_offset" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint_offset'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+----------------------+------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_smallint_offset | public.time_bucket(smallint,smallint,smallint) | 2 | | 1 | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_smallint_offset; +psql:include/cagg_query_common.sql:518: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_31_chunk +CREATE MATERIALIZED VIEW cagg_int + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM table_int + GROUP BY 1; +psql:include/cagg_query_common.sql:524: NOTICE: refreshing continuous aggregate "cagg_int" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+----------------+-------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_int | public.time_bucket(integer,integer) | 2 | | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_int; +psql:include/cagg_query_common.sql:526: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_32_chunk +CREATE MATERIALIZED VIEW cagg_int_offset + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time, "offset"=>1::int), SUM(data) as value + FROM table_int + GROUP BY 1; +psql:include/cagg_query_common.sql:532: NOTICE: refreshing continuous aggregate "cagg_int_offset" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int_offset'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+-----------------+---------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_int_offset | public.time_bucket(integer,integer,integer) | 2 | | 1 | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_int_offset; +psql:include/cagg_query_common.sql:534: NOTICE: drop cascades to table _timescaledb_internal._hyper_29_33_chunk +CREATE MATERIALIZED VIEW cagg_bigint + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM table_bigint + GROUP BY 1 WITH NO DATA; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+----------------+-----------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_bigint | public.time_bucket(bigint,bigint) | 2 | | | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_bigint; +CREATE MATERIALIZED VIEW cagg_bigint_offset + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time, "offset"=>1::bigint), SUM(data) as value + FROM table_bigint + GROUP BY 1 WITH NO DATA; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint_offset'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+--------------------+------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_bigint_offset | public.time_bucket(bigint,bigint,bigint) | 2 | | 1 | | t +(1 row) + +DROP MATERIALIZED VIEW cagg_bigint_offset; +-- Without named parameter +CREATE MATERIALIZED VIEW cagg_bigint_offset2 + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time, 1::bigint), SUM(data) as value + FROM table_bigint + GROUP BY 1 WITH NO DATA; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint_offset2'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+---------------------+------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_bigint_offset2 | public.time_bucket(bigint,bigint,bigint) | 2 | | 1 | | t +(1 row) + +-- mess with the bucket_func signature to make sure it will raise an exception +SET ROLE :ROLE_CLUSTER_SUPERUSER; +\set ON_ERROR_STOP 0 +BEGIN; +UPDATE _timescaledb_catalog.continuous_aggs_bucket_function SET bucket_func = 'func_does_not_exist()'; +-- should error because function does not exist +CALL refresh_continuous_aggregate('cagg_bigint_offset2', NULL, NULL); +psql:include/cagg_query_common.sql:566: ERROR: function "func_does_not_exist()" does not exist +ROLLBACK; +\set ON_ERROR_STOP 1 +SET ROLE :ROLE_DEFAULT_PERM_USER; +DROP MATERIALIZED VIEW cagg_bigint_offset2; +-- Test invalid bucket definitions +\set ON_ERROR_STOP 0 +-- Offset and origin at the same time is not allowed (function does not exists) +CREATE MATERIALIZED VIEW cagg_4_hours_offset_and_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, origin=>'2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:580: ERROR: function time_bucket(unknown, timestamp with time zone, offset => interval, origin => timestamp with time zone) does not exist at character 140 +-- Offset and origin at the same time is not allowed (function does exists but invalid parameter combination) +CREATE MATERIALIZED VIEW cagg_4_hours_offset_and_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, origin=>'2000-01-01 01:00:00 PST'::timestamptz, timezone=>'UTC'), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:587: ERROR: using offset and origin in a time_bucket function at the same time is not supported +\set ON_ERROR_STOP 1 +--- +-- Tests with CAgg processing +--- +-- Check used timezone +SHOW timezone; + TimeZone +---------- + PST8PDT +(1 row) + +-- Populate it +INSERT INTO temperature + SELECT time, 5 + FROM generate_series('2000-01-01 01:00:00 PST'::timestamptz, + '2000-01-01 23:59:59 PST','1m') time; +INSERT INTO temperature + SELECT time, 6 + FROM generate_series('2020-01-01 00:00:00 PST'::timestamptz, + '2020-01-01 23:59:59 PST','1m') time; +-- Create CAggs +CREATE MATERIALIZED VIEW cagg_4_hours + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:613: NOTICE: refreshing continuous aggregate "cagg_4_hours" +CREATE MATERIALIZED VIEW cagg_4_hours_offset + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '30m'::interval), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:619: NOTICE: refreshing continuous aggregate "cagg_4_hours_offset" +-- Align origin with first value +CREATE MATERIALIZED VIEW cagg_4_hours_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:626: NOTICE: refreshing continuous aggregate "cagg_4_hours_origin" +-- Query the CAggs and check that all buckets are materialized +SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+----- + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 6 + Wed Jan 01 20:00:00 2020 PST | 6 +(12 rows) + +SELECT * FROM cagg_4_hours; + time_bucket | max +------------------------------+----- + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 6 + Wed Jan 01 20:00:00 2020 PST | 6 +(12 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours; + time_bucket | max +------------------------------+----- + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 6 + Wed Jan 01 20:00:00 2020 PST | 6 +(12 rows) + +SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+----- + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 6 + Wed Jan 01 16:30:00 2020 PST | 6 + Wed Jan 01 20:30:00 2020 PST | 6 +(13 rows) + +SELECT * FROM cagg_4_hours_offset; + time_bucket | max +------------------------------+----- + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 6 + Wed Jan 01 16:30:00 2020 PST | 6 + Wed Jan 01 20:30:00 2020 PST | 6 +(13 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_offset; + time_bucket | max +------------------------------+----- + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 6 + Wed Jan 01 16:30:00 2020 PST | 6 + Wed Jan 01 20:30:00 2020 PST | 6 +(13 rows) + +SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+----- + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 6 + Wed Jan 01 17:00:00 2020 PST | 6 + Wed Jan 01 21:00:00 2020 PST | 6 +(13 rows) + +SELECT * FROM cagg_4_hours_origin; + time_bucket | max +------------------------------+----- + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 6 + Wed Jan 01 17:00:00 2020 PST | 6 + Wed Jan 01 21:00:00 2020 PST | 6 +(13 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_origin; + time_bucket | max +------------------------------+----- + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 6 + Wed Jan 01 17:00:00 2020 PST | 6 + Wed Jan 01 21:00:00 2020 PST | 6 +(13 rows) + +-- Update the last bucket and re-materialize +INSERT INTO temperature values('2020-01-01 23:55:00 PST', 10); +CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +SELECT * FROM cagg_4_hours; + time_bucket | max +------------------------------+----- + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 6 + Wed Jan 01 20:00:00 2020 PST | 10 +(12 rows) + +SELECT * FROM cagg_4_hours_offset; + time_bucket | max +------------------------------+----- + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 6 + Wed Jan 01 16:30:00 2020 PST | 6 + Wed Jan 01 20:30:00 2020 PST | 10 +(13 rows) + +SELECT * FROM cagg_4_hours_origin; + time_bucket | max +------------------------------+----- + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 6 + Wed Jan 01 17:00:00 2020 PST | 6 + Wed Jan 01 21:00:00 2020 PST | 10 +(13 rows) + +-- Check the real-time functionality +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=false); +-- Check watermarks +SELECT continuous_agg.user_view_name, continuous_aggs_watermark.watermark, _timescaledb_functions.to_timestamp(watermark) + FROM _timescaledb_catalog.continuous_aggs_watermark + JOIN _timescaledb_catalog.continuous_agg USING (mat_hypertable_id) +WHERE user_view_name LIKE 'cagg_4_hours%' +ORDER BY mat_hypertable_id, watermark; + user_view_name | watermark | to_timestamp +---------------------+------------------+------------------------------ + cagg_4_hours | 1577952000000000 | Thu Jan 02 00:00:00 2020 PST + cagg_4_hours_offset | 1577953800000000 | Thu Jan 02 00:30:00 2020 PST + cagg_4_hours_origin | 1577955600000000 | Thu Jan 02 01:00:00 2020 PST +(3 rows) + +-- Insert new data +INSERT INTO temperature values('2020-01-02 00:10:00 PST', 2222); +INSERT INTO temperature values('2020-01-02 05:35:00 PST', 5555); +INSERT INTO temperature values('2020-01-02 09:05:00 PST', 8888); +-- Watermark is at Thu Jan 02 00:00:00 2020 PST - all inserted tuples should be seen +SELECT * FROM cagg_4_hours; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 6 + Wed Jan 01 20:00:00 2020 PST | 10 + Thu Jan 02 00:00:00 2020 PST | 2222 + Thu Jan 02 04:00:00 2020 PST | 5555 + Thu Jan 02 08:00:00 2020 PST | 8888 +(15 rows) + +-- Watermark is at Thu Jan 02 00:30:00 2020 PST - only two inserted tuples should be seen +SELECT * FROM cagg_4_hours_offset; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 6 + Wed Jan 01 16:30:00 2020 PST | 6 + Wed Jan 01 20:30:00 2020 PST | 10 + Thu Jan 02 04:30:00 2020 PST | 5555 + Thu Jan 02 08:30:00 2020 PST | 8888 +(15 rows) + +-- Watermark is at Thu Jan 02 01:00:00 2020 PST - only two inserted tuples should be seen +SELECT * FROM cagg_4_hours_origin; + time_bucket | max +------------------------------+------ + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 6 + Wed Jan 01 17:00:00 2020 PST | 6 + Wed Jan 01 21:00:00 2020 PST | 10 + Thu Jan 02 05:00:00 2020 PST | 5555 + Thu Jan 02 09:00:00 2020 PST | 8888 +(15 rows) + +-- Update materialized data +SET client_min_messages TO DEBUG1; +CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +psql:include/cagg_query_common.sql:683: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +psql:include/cagg_query_common.sql:683: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Thu Jan 02 00:00:00 2020 PST, Thu Jan 02 12:00:00 2020 PST ] +psql:include/cagg_query_common.sql:683: LOG: merged 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:683: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" +CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +psql:include/cagg_query_common.sql:684: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +psql:include/cagg_query_common.sql:684: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577995200000000 +psql:include/cagg_query_common.sql:684: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Wed Jan 01 20:30:00 2020 PST, Thu Jan 02 12:30:00 2020 PST ] +psql:include/cagg_query_common.sql:684: LOG: merged 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:684: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" +CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +psql:include/cagg_query_common.sql:685: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +psql:include/cagg_query_common.sql:685: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577995200000000 +psql:include/cagg_query_common.sql:685: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Wed Jan 01 21:00:00 2020 PST, Thu Jan 02 13:00:00 2020 PST ] +psql:include/cagg_query_common.sql:685: LOG: merged 3 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:685: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" +RESET client_min_messages; +psql:include/cagg_query_common.sql:686: LOG: statement: RESET client_min_messages; +-- Query the CAggs and check that all buckets are materialized +SELECT * FROM cagg_4_hours; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 6 + Wed Jan 01 20:00:00 2020 PST | 10 + Thu Jan 02 00:00:00 2020 PST | 2222 + Thu Jan 02 04:00:00 2020 PST | 5555 + Thu Jan 02 08:00:00 2020 PST | 8888 +(15 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 6 + Wed Jan 01 20:00:00 2020 PST | 10 + Thu Jan 02 00:00:00 2020 PST | 2222 + Thu Jan 02 04:00:00 2020 PST | 5555 + Thu Jan 02 08:00:00 2020 PST | 8888 +(15 rows) + +SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 6 + Wed Jan 01 20:00:00 2020 PST | 10 + Thu Jan 02 00:00:00 2020 PST | 2222 + Thu Jan 02 04:00:00 2020 PST | 5555 + Thu Jan 02 08:00:00 2020 PST | 8888 +(15 rows) + +SELECT * FROM cagg_4_hours_offset; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 6 + Wed Jan 01 16:30:00 2020 PST | 6 + Wed Jan 01 20:30:00 2020 PST | 2222 + Thu Jan 02 04:30:00 2020 PST | 5555 + Thu Jan 02 08:30:00 2020 PST | 8888 +(15 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_offset; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 6 + Wed Jan 01 16:30:00 2020 PST | 6 + Wed Jan 01 20:30:00 2020 PST | 2222 + Thu Jan 02 04:30:00 2020 PST | 5555 + Thu Jan 02 08:30:00 2020 PST | 8888 +(15 rows) + +SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 6 + Wed Jan 01 16:30:00 2020 PST | 6 + Wed Jan 01 20:30:00 2020 PST | 2222 + Thu Jan 02 04:30:00 2020 PST | 5555 + Thu Jan 02 08:30:00 2020 PST | 8888 +(15 rows) + +SELECT * FROM cagg_4_hours_origin; + time_bucket | max +------------------------------+------ + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 6 + Wed Jan 01 17:00:00 2020 PST | 6 + Wed Jan 01 21:00:00 2020 PST | 2222 + Thu Jan 02 05:00:00 2020 PST | 5555 + Thu Jan 02 09:00:00 2020 PST | 8888 +(15 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_origin; + time_bucket | max +------------------------------+------ + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 6 + Wed Jan 01 17:00:00 2020 PST | 6 + Wed Jan 01 21:00:00 2020 PST | 2222 + Thu Jan 02 05:00:00 2020 PST | 5555 + Thu Jan 02 09:00:00 2020 PST | 8888 +(15 rows) + +SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+------ + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 6 + Wed Jan 01 17:00:00 2020 PST | 6 + Wed Jan 01 21:00:00 2020 PST | 2222 + Thu Jan 02 05:00:00 2020 PST | 5555 + Thu Jan 02 09:00:00 2020 PST | 8888 +(15 rows) + +-- Test invalidations +TRUNCATE temperature; +CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +INSERT INTO temperature + SELECT time, 5 + FROM generate_series('2000-01-01 01:00:00 PST'::timestamptz, + '2000-01-01 23:59:59 PST','1m') time; +INSERT INTO temperature + SELECT time, 6 + FROM generate_series('2020-01-01 00:00:00 PST'::timestamptz, + '2020-01-01 23:59:59 PST','1m') time; +INSERT INTO temperature values('2020-01-02 01:05:00+01', 2222); +INSERT INTO temperature values('2020-01-02 01:35:00+01', 5555); +INSERT INTO temperature values('2020-01-02 05:05:00+01', 8888); +SET client_min_messages TO DEBUG1; +CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +psql:include/cagg_query_common.sql:725: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +psql:include/cagg_query_common.sql:725: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 +psql:include/cagg_query_common.sql:725: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Sat Jan 01 00:00:00 2000 PST, Sun Jan 02 00:00:00 2000 PST ] +psql:include/cagg_query_common.sql:725: LOG: merged 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:725: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:725: DEBUG: hypertable 33 existing watermark >= new watermark 1577995200000000 946800000000000 +psql:include/cagg_query_common.sql:725: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours" in window [ Wed Jan 01 00:00:00 2020 PST, Thu Jan 02 00:00:00 2020 PST ] +psql:include/cagg_query_common.sql:725: LOG: merged 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:725: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_33" +psql:include/cagg_query_common.sql:725: DEBUG: hypertable 33 existing watermark >= new watermark 1577995200000000 1577952000000000 +CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +psql:include/cagg_query_common.sql:726: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +psql:include/cagg_query_common.sql:726: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 +psql:include/cagg_query_common.sql:726: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Fri Dec 31 20:30:00 1999 PST, Sun Jan 02 00:30:00 2000 PST ] +psql:include/cagg_query_common.sql:726: LOG: merged 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:726: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:726: DEBUG: hypertable 34 existing watermark >= new watermark 1577997000000000 946801800000000 +psql:include/cagg_query_common.sql:726: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_offset" in window [ Tue Dec 31 20:30:00 2019 PST, Thu Jan 02 00:30:00 2020 PST ] +psql:include/cagg_query_common.sql:726: LOG: merged 7 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:726: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_34" +psql:include/cagg_query_common.sql:726: DEBUG: hypertable 34 existing watermark >= new watermark 1577997000000000 1577953800000000 +CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +psql:include/cagg_query_common.sql:727: LOG: statement: CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +psql:include/cagg_query_common.sql:727: DEBUG: hypertable 4 existing watermark >= new invalidation threshold 1577995200000000 1577952000000000 +psql:include/cagg_query_common.sql:727: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Fri Dec 31 21:00:00 1999 PST, Sun Jan 02 01:00:00 2000 PST ] +psql:include/cagg_query_common.sql:727: LOG: merged 6 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:727: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:727: DEBUG: hypertable 35 existing watermark >= new watermark 1577998800000000 946803600000000 +psql:include/cagg_query_common.sql:727: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_4_hours_origin" in window [ Tue Dec 31 21:00:00 2019 PST, Thu Jan 02 01:00:00 2020 PST ] +psql:include/cagg_query_common.sql:727: LOG: merged 7 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:727: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_35" +psql:include/cagg_query_common.sql:727: DEBUG: hypertable 35 existing watermark >= new watermark 1577998800000000 1577955600000000 +RESET client_min_messages; +psql:include/cagg_query_common.sql:728: LOG: statement: RESET client_min_messages; +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 5555 + Wed Jan 01 20:00:00 2020 PST | 8888 +(12 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=false); +SELECT * FROM cagg_4_hours; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 5555 + Wed Jan 01 20:00:00 2020 PST | 8888 +(12 rows) + +SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:00:00 2000 PST | 5 + Sat Jan 01 04:00:00 2000 PST | 5 + Sat Jan 01 08:00:00 2000 PST | 5 + Sat Jan 01 12:00:00 2000 PST | 5 + Sat Jan 01 16:00:00 2000 PST | 5 + Sat Jan 01 20:00:00 2000 PST | 5 + Wed Jan 01 00:00:00 2020 PST | 6 + Wed Jan 01 04:00:00 2020 PST | 6 + Wed Jan 01 08:00:00 2020 PST | 6 + Wed Jan 01 12:00:00 2020 PST | 6 + Wed Jan 01 16:00:00 2020 PST | 5555 + Wed Jan 01 20:00:00 2020 PST | 8888 +(12 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_offset; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 2222 + Wed Jan 01 16:30:00 2020 PST | 8888 + Wed Jan 01 20:30:00 2020 PST | 6 +(13 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=false); +SELECT * FROM cagg_4_hours_offset; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 2222 + Wed Jan 01 16:30:00 2020 PST | 8888 + Wed Jan 01 20:30:00 2020 PST | 6 +(13 rows) + +SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+------ + Sat Jan 01 00:30:00 2000 PST | 5 + Sat Jan 01 04:30:00 2000 PST | 5 + Sat Jan 01 08:30:00 2000 PST | 5 + Sat Jan 01 12:30:00 2000 PST | 5 + Sat Jan 01 16:30:00 2000 PST | 5 + Sat Jan 01 20:30:00 2000 PST | 5 + Tue Dec 31 20:30:00 2019 PST | 6 + Wed Jan 01 00:30:00 2020 PST | 6 + Wed Jan 01 04:30:00 2020 PST | 6 + Wed Jan 01 08:30:00 2020 PST | 6 + Wed Jan 01 12:30:00 2020 PST | 2222 + Wed Jan 01 16:30:00 2020 PST | 8888 + Wed Jan 01 20:30:00 2020 PST | 6 +(13 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_origin; + time_bucket | max +------------------------------+------ + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 5555 + Wed Jan 01 17:00:00 2020 PST | 8888 + Wed Jan 01 21:00:00 2020 PST | 6 +(13 rows) + +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=false); +SELECT * FROM cagg_4_hours_origin; + time_bucket | max +------------------------------+------ + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 5555 + Wed Jan 01 17:00:00 2020 PST | 8888 + Wed Jan 01 21:00:00 2020 PST | 6 +(13 rows) + +SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+------ + Sat Jan 01 01:00:00 2000 PST | 5 + Sat Jan 01 05:00:00 2000 PST | 5 + Sat Jan 01 09:00:00 2000 PST | 5 + Sat Jan 01 13:00:00 2000 PST | 5 + Sat Jan 01 17:00:00 2000 PST | 5 + Sat Jan 01 21:00:00 2000 PST | 5 + Tue Dec 31 21:00:00 2019 PST | 6 + Wed Jan 01 01:00:00 2020 PST | 6 + Wed Jan 01 05:00:00 2020 PST | 6 + Wed Jan 01 09:00:00 2020 PST | 6 + Wed Jan 01 13:00:00 2020 PST | 5555 + Wed Jan 01 17:00:00 2020 PST | 8888 + Wed Jan 01 21:00:00 2020 PST | 6 +(13 rows) + +--- Test with variable width buckets (use February, since hourly origins are not supported with variable sized buckets) +TRUNCATE temperature; +INSERT INTO temperature + SELECT time, 5 + FROM generate_series('2000-02-01 01:00:00 PST'::timestamptz, + '2000-02-01 23:59:59 PST','1m') time; +INSERT INTO temperature + SELECT time, 6 + FROM generate_series('2020-02-01 01:00:00 PST'::timestamptz, + '2020-02-01 23:59:59 PST','1m') time; +SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log ORDER BY 1, 2, 3; + materialization_id | lowest_modified_value | greatest_modified_value +--------------------+-----------------------+------------------------- + 2 | -9223372036854775808 | -210866803200000001 + 2 | 1541289600000000 | 9223372036854775807 + 3 | -9223372036854775808 | -210866803200000001 + 3 | 1541289600000000 | 9223372036854775807 + 33 | -9223372036854775808 | -210866803200000001 + 33 | 1577995200000000 | 9223372036854775807 + 34 | -9223372036854775808 | -210866803200000001 + 34 | 1577995200000000 | 9223372036854775807 + 35 | -9223372036854775808 | -210866803200000001 + 35 | 1577995200000000 | 9223372036854775807 +(10 rows) + +CREATE MATERIALIZED VIEW cagg_1_year + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('1 year', time), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:766: NOTICE: refreshing continuous aggregate "cagg_1_year" +SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log ORDER BY 1, 2, 3; + materialization_id | lowest_modified_value | greatest_modified_value +--------------------+-----------------------+------------------------- + 2 | -9223372036854775808 | -210866803200000001 + 2 | 1541289600000000 | 9223372036854775807 + 3 | -9223372036854775808 | -210866803200000001 + 3 | 1541289600000000 | 9223372036854775807 + 33 | -9223372036854775808 | -210866803200000001 + 33 | -9223372036854775808 | 9223372036854775807 + 33 | 1577995200000000 | 9223372036854775807 + 34 | -9223372036854775808 | -210866803200000001 + 34 | -9223372036854775808 | 9223372036854775807 + 34 | 1577995200000000 | 9223372036854775807 + 35 | -9223372036854775808 | -210866803200000001 + 35 | -9223372036854775808 | 9223372036854775807 + 35 | 1577995200000000 | 9223372036854775807 + 36 | 1609459200000000 | 9223372036854775807 +(14 rows) + +--- +-- Tests with integer based hypertables +--- +TRUNCATE table_int; +INSERT INTO table_int + SELECT time, 5 + FROM generate_series(-50, 50) time; +CREATE MATERIALIZED VIEW cagg_int + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS SELECT time_bucket('10', time), SUM(data) as value + FROM table_int + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:783: NOTICE: refreshing continuous aggregate "cagg_int" +CREATE MATERIALIZED VIEW cagg_int_offset + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS SELECT time_bucket('10', time, "offset"=>5), SUM(data) as value + FROM table_int + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:789: NOTICE: refreshing continuous aggregate "cagg_int_offset" +-- Compare bucketing results +SELECT time_bucket('10', time), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; + time_bucket | sum +-------------+----- + -50 | 50 + -40 | 50 + -30 | 50 + -20 | 50 + -10 | 50 + 0 | 50 + 10 | 50 + 20 | 50 + 30 | 50 + 40 | 50 + 50 | 5 +(11 rows) + +SELECT * FROM cagg_int; + time_bucket | value +-------------+------- + -50 | 50 + -40 | 50 + -30 | 50 + -20 | 50 + -10 | 50 + 0 | 50 + 10 | 50 + 20 | 50 + 30 | 50 + 40 | 50 + 50 | 5 +(11 rows) + +SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; + time_bucket | sum +-------------+----- + -55 | 25 + -45 | 50 + -35 | 50 + -25 | 50 + -15 | 50 + -5 | 50 + 5 | 50 + 15 | 50 + 25 | 50 + 35 | 50 + 45 | 30 +(11 rows) + +SELECT * FROM cagg_int_offset; + time_bucket | value +-------------+------- + -55 | 25 + -45 | 50 + -35 | 50 + -25 | 50 + -15 | 50 + -5 | 50 + 5 | 50 + 15 | 50 + 25 | 50 + 35 | 50 + 45 | 30 +(11 rows) + +-- Update table +INSERT INTO table_int VALUES(51, 100); +INSERT INTO table_int VALUES(100, 555); +-- Compare bucketing results +SELECT time_bucket('10', time), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; + time_bucket | sum +-------------+----- + -50 | 50 + -40 | 50 + -30 | 50 + -20 | 50 + -10 | 50 + 0 | 50 + 10 | 50 + 20 | 50 + 30 | 50 + 40 | 50 + 50 | 105 + 100 | 555 +(12 rows) + +SELECT * FROM cagg_int; + time_bucket | value +-------------+------- + -50 | 50 + -40 | 50 + -30 | 50 + -20 | 50 + -10 | 50 + 0 | 50 + 10 | 50 + 20 | 50 + 30 | 50 + 40 | 50 + 50 | 5 + 100 | 555 +(12 rows) + +CALL refresh_continuous_aggregate('cagg_int', NULL, NULL); +SELECT * FROM cagg_int; + time_bucket | value +-------------+------- + -50 | 50 + -40 | 50 + -30 | 50 + -20 | 50 + -10 | 50 + 0 | 50 + 10 | 50 + 20 | 50 + 30 | 50 + 40 | 50 + 50 | 105 + 100 | 555 +(12 rows) + +SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; + time_bucket | sum +-------------+----- + -55 | 25 + -45 | 50 + -35 | 50 + -25 | 50 + -15 | 50 + -5 | 50 + 5 | 50 + 15 | 50 + 25 | 50 + 35 | 50 + 45 | 130 + 95 | 555 +(12 rows) + +SELECT * FROM cagg_int_offset; -- the value 100 is part of the already serialized bucket, so it should not be visible + time_bucket | value +-------------+------- + -55 | 25 + -45 | 50 + -35 | 50 + -25 | 50 + -15 | 50 + -5 | 50 + 5 | 50 + 15 | 50 + 25 | 50 + 35 | 50 + 45 | 30 + 95 | 555 +(12 rows) + +CALL refresh_continuous_aggregate('cagg_int_offset', NULL, NULL); +SELECT * FROM cagg_int_offset; + time_bucket | value +-------------+------- + -55 | 25 + -45 | 50 + -35 | 50 + -25 | 50 + -15 | 50 + -5 | 50 + 5 | 50 + 15 | 50 + 25 | 50 + 35 | 50 + 45 | 130 + 95 | 555 +(12 rows) + +-- Ensure everything was materialized +ALTER MATERIALIZED VIEW cagg_int SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW cagg_int_offset SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_int; + time_bucket | value +-------------+------- + -50 | 50 + -40 | 50 + -30 | 50 + -20 | 50 + -10 | 50 + 0 | 50 + 10 | 50 + 20 | 50 + 30 | 50 + 40 | 50 + 50 | 105 + 100 | 555 +(12 rows) + +SELECT * FROM cagg_int_offset; + time_bucket | value +-------------+------- + -55 | 25 + -45 | 50 + -35 | 50 + -25 | 50 + -15 | 50 + -5 | 50 + 5 | 50 + 15 | 50 + 25 | 50 + 35 | 50 + 45 | 130 + 95 | 555 +(12 rows) + +-- Check that the refresh is properly aligned +INSERT INTO table_int VALUES(114, 0); +SET client_min_messages TO DEBUG1; +CALL refresh_continuous_aggregate('cagg_int_offset', 110, 130); +psql:include/cagg_query_common.sql:824: LOG: statement: CALL refresh_continuous_aggregate('cagg_int_offset', 110, 130); +psql:include/cagg_query_common.sql:824: DEBUG: continuous aggregate refresh (individual invalidation) on "cagg_int_offset" in window [ 105, 135 ] +psql:include/cagg_query_common.sql:824: DEBUG: building index "_hyper_38_67_chunk__materialized_hypertable_38_time_bucket_idx" on table "_hyper_38_67_chunk" serially +psql:include/cagg_query_common.sql:824: DEBUG: index "_hyper_38_67_chunk__materialized_hypertable_38_time_bucket_idx" can safely use deduplication +psql:include/cagg_query_common.sql:824: LOG: merged 1 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_38" +psql:include/cagg_query_common.sql:824: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_38" +RESET client_min_messages; +psql:include/cagg_query_common.sql:825: LOG: statement: RESET client_min_messages; +SELECT * FROM cagg_int_offset; + time_bucket | value +-------------+------- + -55 | 25 + -45 | 50 + -35 | 50 + -25 | 50 + -15 | 50 + -5 | 50 + 5 | 50 + 15 | 50 + 25 | 50 + 35 | 50 + 45 | 130 + 95 | 555 + 105 | 0 +(13 rows) + +SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; + time_bucket | sum +-------------+----- + -55 | 25 + -45 | 50 + -35 | 50 + -25 | 50 + -15 | 50 + -5 | 50 + 5 | 50 + 15 | 50 + 25 | 50 + 35 | 50 + 45 | 130 + 95 | 555 + 105 | 0 +(13 rows) + +-- Variable sized buckets with origin +CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 year', time, origin=>'2000-01-01 01:05:00 UTC'::timestamptz, timezone=>'UTC') AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:835: NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+------------------------------------------+---------------------------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_1_hour_variable_bucket_fixed_origin | public.time_bucket(interval,timestamp with time zone,pg_catalog.text,timestamp with time zone,interval) | @ 1 year | Fri Dec 31 17:05:00 1999 PST | | UTC | f +(1 row) + +DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin; +psql:include/cagg_query_common.sql:837: NOTICE: drop cascades to 2 other objects +-- Variable due to the used timezone +CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin2 + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 hour', time, origin=>'2000-01-01 01:05:00 UTC'::timestamptz, timezone=>'UTC') AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:844: NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin2" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin2'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+-------------------------------------------+---------------------------------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_1_hour_variable_bucket_fixed_origin2 | public.time_bucket(interval,timestamp with time zone,pg_catalog.text,timestamp with time zone,interval) | @ 1 hour | Fri Dec 31 17:05:00 1999 PST | | UTC | f +(1 row) + +DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin2; +psql:include/cagg_query_common.sql:846: NOTICE: drop cascades to 2 other objects +-- Variable with offset +CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin3 + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 year', time, "offset"=>'5 minutes'::interval) AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:853: NOTICE: refreshing continuous aggregate "cagg_1_hour_variable_bucket_fixed_origin3" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin3'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+-------------------------------------------+----------------------------------------------------------------+--------------+---------------+---------------+-----------------+-------------------- + public | cagg_1_hour_variable_bucket_fixed_origin3 | public.time_bucket(interval,timestamp with time zone,interval) | @ 1 year | | @ 5 mins | | f +(1 row) + +DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin3; +psql:include/cagg_query_common.sql:855: NOTICE: drop cascades to 2 other objects +--- +-- Test with blocking a few broken configurations +--- +\set ON_ERROR_STOP 0 +-- Unfortunately '\set VERBOSITY verbose' cannot be used here to check the error details +-- since it also prints the line number of the location, which is depended on the build +-- Different time origin +CREATE MATERIALIZED VIEW cagg_1_hour_origin + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 hour', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:870: NOTICE: refreshing continuous aggregate "cagg_1_hour_origin" +CREATE MATERIALIZED VIEW cagg_1_week_origin + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 week', hour_bucket, origin=>'2022-01-02 01:00:00 PST'::timestamptz) AS week_bucket, max(max_value) AS max_value + FROM cagg_1_hour_origin + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:876: ERROR: cannot create continuous aggregate with different bucket origin values +-- Different time offset +CREATE MATERIALIZED VIEW cagg_1_hour_offset + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 hour', time, "offset"=>'30m'::interval) AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:883: NOTICE: refreshing continuous aggregate "cagg_1_hour_offset" +CREATE MATERIALIZED VIEW cagg_1_week_offset + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 week', hour_bucket, "offset"=>'35m'::interval) AS week_bucket, max(max_value) AS max_value + FROM cagg_1_hour_offset + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:889: ERROR: cannot create continuous aggregate with different bucket offset values +-- Different integer offset +CREATE MATERIALIZED VIEW cagg_int_offset_5 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS SELECT time_bucket('10', time, "offset"=>5) AS time, SUM(data) AS value + FROM table_int + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:896: NOTICE: refreshing continuous aggregate "cagg_int_offset_5" +CREATE MATERIALIZED VIEW cagg_int_offset_10 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS SELECT time_bucket('10', time, "offset"=>10) AS time, SUM(value) AS value + FROM cagg_int_offset_5 + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:902: ERROR: cannot create continuous aggregate with different bucket offset values +\set ON_ERROR_STOP 1 +DROP MATERIALIZED VIEW cagg_1_hour_origin; +psql:include/cagg_query_common.sql:906: NOTICE: drop cascades to 2 other objects +DROP MATERIALIZED VIEW cagg_1_hour_offset; +psql:include/cagg_query_common.sql:907: NOTICE: drop cascades to 2 other objects +DROP MATERIALIZED VIEW cagg_int_offset_5; +psql:include/cagg_query_common.sql:908: NOTICE: drop cascades to 3 other objects +--- +-- CAGGs on CAGGs tests +--- +CREATE MATERIALIZED VIEW cagg_1_hour_offset + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 hour', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:917: NOTICE: refreshing continuous aggregate "cagg_1_hour_offset" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_offset'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+--------------------+--------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_1_hour_offset | public.time_bucket(interval,timestamp with time zone,timestamp with time zone) | @ 1 hour | Sun Jan 02 01:00:00 2000 PST | | | t +(1 row) + +CREATE MATERIALIZED VIEW cagg_1_week_offset + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 week', hour_bucket, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS week_bucket, max(max_value) AS max_value + FROM cagg_1_hour_offset + GROUP BY 1 ORDER BY 1; +psql:include/cagg_query_common.sql:924: NOTICE: refreshing continuous aggregate "cagg_1_week_offset" +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_week_offset'; + user_view_schema | user_view_name | bucket_func | bucket_width | bucket_origin | bucket_offset | bucket_timezone | bucket_fixed_width +------------------+--------------------+--------------------------------------------------------------------------------+--------------+------------------------------+---------------+-----------------+-------------------- + public | cagg_1_week_offset | public.time_bucket(interval,timestamp with time zone,timestamp with time zone) | @ 7 days | Sun Jan 02 01:00:00 2000 PST | | | t +(1 row) + +-- Compare output +SELECT * FROM cagg_1_week_offset; + week_bucket | max_value +------------------------------+----------- + Sun Jan 30 01:00:00 2000 PST | 5 + Sun Jan 26 01:00:00 2020 PST | 6 +(2 rows) + +SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+----- + Sun Jan 30 01:00:00 2000 PST | 5 + Sun Jan 26 01:00:00 2020 PST | 6 +(2 rows) + +INSERT INTO temperature values('2030-01-01 05:05:00 PST', 22222); +INSERT INTO temperature values('2030-01-03 05:05:00 PST', 55555); +-- Compare real-time functionality +ALTER MATERIALIZED VIEW cagg_1_hour_offset SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW cagg_1_week_offset SET (timescaledb.materialized_only=false); +SELECT * FROM cagg_1_week_offset; + week_bucket | max_value +------------------------------+----------- + Sun Jan 30 01:00:00 2000 PST | 5 + Sun Jan 26 01:00:00 2020 PST | 6 + Sun Dec 30 01:00:00 2029 PST | 55555 +(3 rows) + +SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+------- + Sun Jan 30 01:00:00 2000 PST | 5 + Sun Jan 26 01:00:00 2020 PST | 6 + Sun Dec 30 01:00:00 2029 PST | 55555 +(3 rows) + +-- Test refresh +CALL refresh_continuous_aggregate('cagg_1_hour_offset', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_1_week_offset', NULL, NULL); +-- Everything should be now materailized +ALTER MATERIALIZED VIEW cagg_1_hour_offset SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW cagg_1_week_offset SET (timescaledb.materialized_only=false); +SELECT * FROM cagg_1_week_offset; + week_bucket | max_value +------------------------------+----------- + Sun Jan 30 01:00:00 2000 PST | 5 + Sun Jan 26 01:00:00 2020 PST | 6 + Sun Dec 30 01:00:00 2029 PST | 55555 +(3 rows) + +SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +------------------------------+------- + Sun Jan 30 01:00:00 2000 PST | 5 + Sun Jan 26 01:00:00 2020 PST | 6 + Sun Dec 30 01:00:00 2029 PST | 55555 +(3 rows) + +TRUNCATE temperature; +SELECT * FROM cagg_1_week_offset; + week_bucket | max_value +------------------------------+----------- + Sun Jan 30 01:00:00 2000 PST | 5 + Sun Jan 26 01:00:00 2020 PST | 6 + Sun Dec 30 01:00:00 2029 PST | 55555 +(3 rows) + +SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + time_bucket | max +-------------+----- +(0 rows) + +DROP VIEW caggs_info; diff --git a/tsl/test/expected/cagg_refresh.out b/tsl/test/expected/cagg_refresh.out index ee641243b1f..e716f58f15a 100644 --- a/tsl/test/expected/cagg_refresh.out +++ b/tsl/test/expected/cagg_refresh.out @@ -1,15 +1,10 @@ -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. --- Disable background workers since we are testing manual refresh -\c :TEST_DBNAME :ROLE_SUPERUSER -SELECT _timescaledb_functions.stop_background_workers(); - stop_background_workers -------------------------- - t -(1 row) - -SET ROLE :ROLE_DEFAULT_PERM_USER; +\ir include/cagg_refresh_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. CREATE TABLE conditions (time timestamptz NOT NULL, device int, temp float); SELECT create_hypertable('conditions', 'time'); create_hypertable @@ -75,16 +70,16 @@ CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 17:00 PDT', '2020-05 -- These refreshes will fail since they don't align with the bucket's -- time zone CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', '2020-05-04'); -ERROR: refresh window too small +psql:include/cagg_refresh_common.sql:43: ERROR: refresh window too small DETAIL: The refresh window must cover at least one bucket of data. HINT: Align the refresh window with the bucket time zone or use at least two buckets. CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 PDT', '2020-05-04 00:00 PDT'); -ERROR: refresh window too small +psql:include/cagg_refresh_common.sql:44: ERROR: refresh window too small DETAIL: The refresh window must cover at least one bucket of data. HINT: Align the refresh window with the bucket time zone or use at least two buckets. -- Refresh window less than one bucket CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 UTC', '2020-05-03 23:59 UTC'); -ERROR: refresh window too small +psql:include/cagg_refresh_common.sql:47: ERROR: refresh window too small DETAIL: The refresh window must cover at least one bucket of data. HINT: Align the refresh window with the bucket time zone or use at least two buckets. -- Refresh window bigger than one bucket, but failing since it is not @@ -93,7 +88,7 @@ HINT: Align the refresh window with the bucket time zone or use at least two bu -- Refresh window: [----------) -- Buckets: [------|------] CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 01:00 UTC', '2020-05-04 08:00 UTC'); -ERROR: refresh window too small +psql:include/cagg_refresh_common.sql:53: ERROR: refresh window too small DETAIL: The refresh window must cover at least one bucket of data. HINT: Align the refresh window with the bucket time zone or use at least two buckets. \set VERBOSITY terse @@ -121,14 +116,14 @@ ORDER BY day DESC, device; -- Refresh the rest (and try DEBUG output) SET client_min_messages TO DEBUG1; CALL refresh_continuous_aggregate('daily_temp', '2020-04-30', '2020-05-04'); -LOG: statement: CALL refresh_continuous_aggregate('daily_temp', '2020-04-30', '2020-05-04'); -DEBUG: hypertable 1 existing watermark >= new invalidation threshold 1588723200000000 1588550400000000 -DEBUG: continuous aggregate refresh (individual invalidation) on "daily_temp" in window [ Thu Apr 30 17:00:00 2020 PDT, Sat May 02 17:00:00 2020 PDT ] -LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" -LOG: inserted 8 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" -DEBUG: hypertable 2 existing watermark >= new watermark 1588723200000000 1588723200000000 +psql:include/cagg_refresh_common.sql:65: LOG: statement: CALL refresh_continuous_aggregate('daily_temp', '2020-04-30', '2020-05-04'); +psql:include/cagg_refresh_common.sql:65: DEBUG: hypertable 1 existing watermark >= new invalidation threshold 1588723200000000 1588550400000000 +psql:include/cagg_refresh_common.sql:65: DEBUG: continuous aggregate refresh (individual invalidation) on "daily_temp" in window [ Thu Apr 30 17:00:00 2020 PDT, Sat May 02 17:00:00 2020 PDT ] +psql:include/cagg_refresh_common.sql:65: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" +psql:include/cagg_refresh_common.sql:65: LOG: inserted 8 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" +psql:include/cagg_refresh_common.sql:65: DEBUG: hypertable 2 existing watermark >= new watermark 1588723200000000 1588723200000000 RESET client_min_messages; -LOG: statement: RESET client_min_messages; +psql:include/cagg_refresh_common.sql:66: LOG: statement: RESET client_min_messages; -- Compare the aggregate to the equivalent query on the source table SELECT * FROM daily_temp ORDER BY day DESC, device; @@ -186,46 +181,46 @@ ORDER BY 1 DESC,2; -- Test unusual, but valid input CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::timestamptz, '2020-05-03'::date); -NOTICE: continuous aggregate "daily_temp" is already up-to-date +psql:include/cagg_refresh_common.sql:78: NOTICE: continuous aggregate "daily_temp" is already up-to-date CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::date, '2020-05-03'::date); -NOTICE: continuous aggregate "daily_temp" is already up-to-date +psql:include/cagg_refresh_common.sql:79: NOTICE: continuous aggregate "daily_temp" is already up-to-date -- Unbounded window forward in time CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', NULL); -NOTICE: continuous aggregate "daily_temp" is already up-to-date +psql:include/cagg_refresh_common.sql:82: NOTICE: continuous aggregate "daily_temp" is already up-to-date CALL refresh_continuous_aggregate('daily_temp', NULL, NULL); -- Unbounded window back in time CALL refresh_continuous_aggregate('daily_temp', NULL, '2020-05-01'); -NOTICE: continuous aggregate "daily_temp" is already up-to-date +psql:include/cagg_refresh_common.sql:86: NOTICE: continuous aggregate "daily_temp" is already up-to-date -- Test bad input \set ON_ERROR_STOP 0 -- Bad continuous aggregate name CALL refresh_continuous_aggregate(NULL, '2020-05-03', '2020-05-05'); -ERROR: invalid continuous aggregate +psql:include/cagg_refresh_common.sql:91: ERROR: invalid continuous aggregate CALL refresh_continuous_aggregate('xyz', '2020-05-03', '2020-05-05'); -ERROR: relation "xyz" does not exist at character 35 +psql:include/cagg_refresh_common.sql:92: ERROR: relation "xyz" does not exist at character 35 -- Valid object, but not a continuous aggregate CALL refresh_continuous_aggregate('conditions', '2020-05-03', '2020-05-05'); -ERROR: relation "conditions" is not a continuous aggregate +psql:include/cagg_refresh_common.sql:94: ERROR: relation "conditions" is not a continuous aggregate -- Object ID with no object CALL refresh_continuous_aggregate(1, '2020-05-03', '2020-05-05'); -ERROR: continuous aggregate does not exist +psql:include/cagg_refresh_common.sql:96: ERROR: continuous aggregate does not exist -- Lacking arguments CALL refresh_continuous_aggregate('daily_temp'); -ERROR: procedure refresh_continuous_aggregate(unknown) does not exist at character 6 +psql:include/cagg_refresh_common.sql:98: ERROR: procedure refresh_continuous_aggregate(unknown) does not exist at character 6 CALL refresh_continuous_aggregate('daily_temp', '2020-05-03'); -ERROR: procedure refresh_continuous_aggregate(unknown, unknown) does not exist at character 6 +psql:include/cagg_refresh_common.sql:99: ERROR: procedure refresh_continuous_aggregate(unknown, unknown) does not exist at character 6 -- Bad time ranges CALL refresh_continuous_aggregate('daily_temp', 'xyz', '2020-05-05'); -ERROR: invalid input syntax for type timestamp with time zone: "xyz" +psql:include/cagg_refresh_common.sql:101: ERROR: invalid input syntax for type timestamp with time zone: "xyz" CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', 'xyz'); -ERROR: invalid input syntax for type timestamp with time zone: "xyz" +psql:include/cagg_refresh_common.sql:102: ERROR: invalid input syntax for type timestamp with time zone: "xyz" CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', '2020-05-01'); -ERROR: refresh window too small +psql:include/cagg_refresh_common.sql:103: ERROR: refresh window too small -- Bad time input CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::text, '2020-05-03'::text); -ERROR: invalid time argument type "text" +psql:include/cagg_refresh_common.sql:105: ERROR: invalid time argument type "text" CALL refresh_continuous_aggregate('daily_temp', 0, '2020-05-01'); -ERROR: invalid time argument type "integer" +psql:include/cagg_refresh_common.sql:106: ERROR: invalid time argument type "integer" \set ON_ERROR_STOP 1 -- Test different time types CREATE TABLE conditions_date (time date NOT NULL, device int, temp float); @@ -272,7 +267,7 @@ AS SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp FROM conditions_smallint c GROUP BY 1,2 WITH NO DATA; -ERROR: custom time function required on hypertable "conditions_smallint" +psql:include/cagg_refresh_common.sql:150: ERROR: custom time function required on hypertable "conditions_smallint" \set ON_ERROR_STOP 1 SELECT set_integer_now_func('conditions_smallint', 'smallint_now'); set_integer_now_func @@ -427,7 +422,7 @@ AS SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; -NOTICE: refreshing continuous aggregate "weekly_temp_with_data" +psql:include/cagg_refresh_common.sql:255: NOTICE: refreshing continuous aggregate "weekly_temp_with_data" SELECT * FROM weekly_temp_without_data; day | device | avg_temp -----+--------+---------- @@ -449,7 +444,7 @@ SELECT * FROM weekly_temp_with_data ORDER BY 1,2; \set ON_ERROR_STOP 0 -- REFRESH MATERIALIZED VIEW is blocked on continuous aggregates REFRESH MATERIALIZED VIEW weekly_temp_without_data; -ERROR: operation not supported on continuous aggregate +psql:include/cagg_refresh_common.sql:262: ERROR: operation not supported on continuous aggregate -- These should fail since we do not allow refreshing inside a -- transaction, not even as part of CREATE MATERIALIZED VIEW. DO LANGUAGE PLPGSQL $$ BEGIN @@ -461,7 +456,7 @@ SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; END $$; -ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot be executed from a function +psql:include/cagg_refresh_common.sql:274: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot be executed from a function BEGIN; CREATE MATERIALIZED VIEW weekly_conditions WITH (timescaledb.continuous, @@ -470,7 +465,7 @@ AS SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp FROM conditions GROUP BY 1,2 WITH DATA; -ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot run inside a transaction block +psql:include/cagg_refresh_common.sql:283: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot run inside a transaction block COMMIT; \set ON_ERROR_STOP 1 -- This should not fail since we do not refresh the continuous diff --git a/tsl/test/expected/cagg_refresh_using_merge.out b/tsl/test/expected/cagg_refresh_using_merge.out new file mode 100644 index 00000000000..b3e3979caa5 --- /dev/null +++ b/tsl/test/expected/cagg_refresh_using_merge.out @@ -0,0 +1,492 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Enable MERGE statements for continuous aggregate refresh +SET timescaledb.enable_merge_on_cagg_refresh TO ON; +\ir include/cagg_refresh_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE TABLE conditions (time timestamptz NOT NULL, device int, temp float); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +------------------------- + (1,public,conditions,t) +(1 row) + +SELECT setseed(.12); + setseed +--------- + +(1 row) + +INSERT INTO conditions +SELECT t, ceil(abs(timestamp_hash(t::timestamp))%4)::int, abs(timestamp_hash(t::timestamp))%40 +FROM generate_series('2020-05-01', '2020-05-05', '10 minutes'::interval) t; +-- Show the most recent data +SELECT * FROM conditions +ORDER BY time DESC, device +LIMIT 10; + time | device | temp +------------------------------+--------+------ + Tue May 05 00:00:00 2020 PDT | 2 | 30 + Mon May 04 23:50:00 2020 PDT | 2 | 10 + Mon May 04 23:40:00 2020 PDT | 0 | 20 + Mon May 04 23:30:00 2020 PDT | 1 | 1 + Mon May 04 23:20:00 2020 PDT | 2 | 34 + Mon May 04 23:10:00 2020 PDT | 1 | 37 + Mon May 04 23:00:00 2020 PDT | 0 | 4 + Mon May 04 22:50:00 2020 PDT | 2 | 10 + Mon May 04 22:40:00 2020 PDT | 1 | 37 + Mon May 04 22:30:00 2020 PDT | 0 | 8 +(10 rows) + +CREATE MATERIALIZED VIEW daily_temp +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +-- The continuous aggregate should be empty +SELECT * FROM daily_temp +ORDER BY day DESC, device; + day | device | avg_temp +-----+--------+---------- +(0 rows) + +-- Refresh one bucket (1 day): +SHOW timezone; + TimeZone +---------- + PST8PDT +(1 row) + +-- The refresh of a single bucket must align with the start of the day +-- in the bucket's time zone (which is UTC, since time_bucket doesn't +-- support time zone arg) +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 UTC', '2020-05-04 00:00 UTC'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 17:00 PDT', '2020-05-04 17:00 PDT'); +\set ON_ERROR_STOP 0 +\set VERBOSITY default +-- These refreshes will fail since they don't align with the bucket's +-- time zone +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', '2020-05-04'); +psql:include/cagg_refresh_common.sql:43: ERROR: refresh window too small +DETAIL: The refresh window must cover at least one bucket of data. +HINT: Align the refresh window with the bucket time zone or use at least two buckets. +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 PDT', '2020-05-04 00:00 PDT'); +psql:include/cagg_refresh_common.sql:44: ERROR: refresh window too small +DETAIL: The refresh window must cover at least one bucket of data. +HINT: Align the refresh window with the bucket time zone or use at least two buckets. +-- Refresh window less than one bucket +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 UTC', '2020-05-03 23:59 UTC'); +psql:include/cagg_refresh_common.sql:47: ERROR: refresh window too small +DETAIL: The refresh window must cover at least one bucket of data. +HINT: Align the refresh window with the bucket time zone or use at least two buckets. +-- Refresh window bigger than one bucket, but failing since it is not +-- aligned with bucket boundaries so that it covers a full bucket: +-- +-- Refresh window: [----------) +-- Buckets: [------|------] +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 01:00 UTC', '2020-05-04 08:00 UTC'); +psql:include/cagg_refresh_common.sql:53: ERROR: refresh window too small +DETAIL: The refresh window must cover at least one bucket of data. +HINT: Align the refresh window with the bucket time zone or use at least two buckets. +\set VERBOSITY terse +\set ON_ERROR_STOP 1 +-- Refresh the most recent few days: +CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00'); +SELECT * FROM daily_temp +ORDER BY day DESC, device; + day | device | avg_temp +------------------------------+--------+------------------ + Mon May 04 17:00:00 2020 PDT | 0 | 19.3846153846154 + Mon May 04 17:00:00 2020 PDT | 1 | 16.5555555555556 + Mon May 04 17:00:00 2020 PDT | 2 | 18.5714285714286 + Mon May 04 17:00:00 2020 PDT | 3 | 23.5714285714286 + Sun May 03 17:00:00 2020 PDT | 0 | 15.7647058823529 + Sun May 03 17:00:00 2020 PDT | 1 | 24.3142857142857 + Sun May 03 17:00:00 2020 PDT | 2 | 14.8205128205128 + Sun May 03 17:00:00 2020 PDT | 3 | 18.1111111111111 + Sat May 02 17:00:00 2020 PDT | 0 | 17 + Sat May 02 17:00:00 2020 PDT | 1 | 18.75 + Sat May 02 17:00:00 2020 PDT | 2 | 20 + Sat May 02 17:00:00 2020 PDT | 3 | 21.5217391304348 +(12 rows) + +-- Refresh the rest (and try DEBUG output) +SET client_min_messages TO DEBUG1; +CALL refresh_continuous_aggregate('daily_temp', '2020-04-30', '2020-05-04'); +psql:include/cagg_refresh_common.sql:65: LOG: statement: CALL refresh_continuous_aggregate('daily_temp', '2020-04-30', '2020-05-04'); +psql:include/cagg_refresh_common.sql:65: DEBUG: hypertable 1 existing watermark >= new invalidation threshold 1588723200000000 1588550400000000 +psql:include/cagg_refresh_common.sql:65: DEBUG: continuous aggregate refresh (individual invalidation) on "daily_temp" in window [ Thu Apr 30 17:00:00 2020 PDT, Sat May 02 17:00:00 2020 PDT ] +psql:include/cagg_refresh_common.sql:65: LOG: merged 8 row(s) into materialization table "_timescaledb_internal._materialized_hypertable_2" +psql:include/cagg_refresh_common.sql:65: LOG: deleted 0 row(s) from materialization table "_timescaledb_internal._materialized_hypertable_2" +psql:include/cagg_refresh_common.sql:65: DEBUG: hypertable 2 existing watermark >= new watermark 1588723200000000 1588723200000000 +RESET client_min_messages; +psql:include/cagg_refresh_common.sql:66: LOG: statement: RESET client_min_messages; +-- Compare the aggregate to the equivalent query on the source table +SELECT * FROM daily_temp +ORDER BY day DESC, device; + day | device | avg_temp +------------------------------+--------+------------------ + Mon May 04 17:00:00 2020 PDT | 0 | 19.3846153846154 + Mon May 04 17:00:00 2020 PDT | 1 | 16.5555555555556 + Mon May 04 17:00:00 2020 PDT | 2 | 18.5714285714286 + Mon May 04 17:00:00 2020 PDT | 3 | 23.5714285714286 + Sun May 03 17:00:00 2020 PDT | 0 | 15.7647058823529 + Sun May 03 17:00:00 2020 PDT | 1 | 24.3142857142857 + Sun May 03 17:00:00 2020 PDT | 2 | 14.8205128205128 + Sun May 03 17:00:00 2020 PDT | 3 | 18.1111111111111 + Sat May 02 17:00:00 2020 PDT | 0 | 17 + Sat May 02 17:00:00 2020 PDT | 1 | 18.75 + Sat May 02 17:00:00 2020 PDT | 2 | 20 + Sat May 02 17:00:00 2020 PDT | 3 | 21.5217391304348 + Fri May 01 17:00:00 2020 PDT | 0 | 19 + Fri May 01 17:00:00 2020 PDT | 1 | 15.1463414634146 + Fri May 01 17:00:00 2020 PDT | 2 | 19.7674418604651 + Fri May 01 17:00:00 2020 PDT | 3 | 22.25 + Thu Apr 30 17:00:00 2020 PDT | 0 | 17.6666666666667 + Thu Apr 30 17:00:00 2020 PDT | 1 | 18.8333333333333 + Thu Apr 30 17:00:00 2020 PDT | 2 | 16.7586206896552 + Thu Apr 30 17:00:00 2020 PDT | 3 | 20.76 +(20 rows) + +SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 +ORDER BY 1 DESC,2; + day | device | avg_temp +------------------------------+--------+------------------ + Mon May 04 17:00:00 2020 PDT | 0 | 19.3846153846154 + Mon May 04 17:00:00 2020 PDT | 1 | 16.5555555555556 + Mon May 04 17:00:00 2020 PDT | 2 | 18.5714285714286 + Mon May 04 17:00:00 2020 PDT | 3 | 23.5714285714286 + Sun May 03 17:00:00 2020 PDT | 0 | 15.7647058823529 + Sun May 03 17:00:00 2020 PDT | 1 | 24.3142857142857 + Sun May 03 17:00:00 2020 PDT | 2 | 14.8205128205128 + Sun May 03 17:00:00 2020 PDT | 3 | 18.1111111111111 + Sat May 02 17:00:00 2020 PDT | 0 | 17 + Sat May 02 17:00:00 2020 PDT | 1 | 18.75 + Sat May 02 17:00:00 2020 PDT | 2 | 20 + Sat May 02 17:00:00 2020 PDT | 3 | 21.5217391304348 + Fri May 01 17:00:00 2020 PDT | 0 | 19 + Fri May 01 17:00:00 2020 PDT | 1 | 15.1463414634146 + Fri May 01 17:00:00 2020 PDT | 2 | 19.7674418604651 + Fri May 01 17:00:00 2020 PDT | 3 | 22.25 + Thu Apr 30 17:00:00 2020 PDT | 0 | 17.6666666666667 + Thu Apr 30 17:00:00 2020 PDT | 1 | 18.8333333333333 + Thu Apr 30 17:00:00 2020 PDT | 2 | 16.7586206896552 + Thu Apr 30 17:00:00 2020 PDT | 3 | 20.76 +(20 rows) + +-- Test unusual, but valid input +CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::timestamptz, '2020-05-03'::date); +psql:include/cagg_refresh_common.sql:78: NOTICE: continuous aggregate "daily_temp" is already up-to-date +CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::date, '2020-05-03'::date); +psql:include/cagg_refresh_common.sql:79: NOTICE: continuous aggregate "daily_temp" is already up-to-date +-- Unbounded window forward in time +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', NULL); +psql:include/cagg_refresh_common.sql:82: NOTICE: continuous aggregate "daily_temp" is already up-to-date +CALL refresh_continuous_aggregate('daily_temp', NULL, NULL); +-- Unbounded window back in time +CALL refresh_continuous_aggregate('daily_temp', NULL, '2020-05-01'); +psql:include/cagg_refresh_common.sql:86: NOTICE: continuous aggregate "daily_temp" is already up-to-date +-- Test bad input +\set ON_ERROR_STOP 0 +-- Bad continuous aggregate name +CALL refresh_continuous_aggregate(NULL, '2020-05-03', '2020-05-05'); +psql:include/cagg_refresh_common.sql:91: ERROR: invalid continuous aggregate +CALL refresh_continuous_aggregate('xyz', '2020-05-03', '2020-05-05'); +psql:include/cagg_refresh_common.sql:92: ERROR: relation "xyz" does not exist at character 35 +-- Valid object, but not a continuous aggregate +CALL refresh_continuous_aggregate('conditions', '2020-05-03', '2020-05-05'); +psql:include/cagg_refresh_common.sql:94: ERROR: relation "conditions" is not a continuous aggregate +-- Object ID with no object +CALL refresh_continuous_aggregate(1, '2020-05-03', '2020-05-05'); +psql:include/cagg_refresh_common.sql:96: ERROR: continuous aggregate does not exist +-- Lacking arguments +CALL refresh_continuous_aggregate('daily_temp'); +psql:include/cagg_refresh_common.sql:98: ERROR: procedure refresh_continuous_aggregate(unknown) does not exist at character 6 +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03'); +psql:include/cagg_refresh_common.sql:99: ERROR: procedure refresh_continuous_aggregate(unknown, unknown) does not exist at character 6 +-- Bad time ranges +CALL refresh_continuous_aggregate('daily_temp', 'xyz', '2020-05-05'); +psql:include/cagg_refresh_common.sql:101: ERROR: invalid input syntax for type timestamp with time zone: "xyz" +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', 'xyz'); +psql:include/cagg_refresh_common.sql:102: ERROR: invalid input syntax for type timestamp with time zone: "xyz" +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', '2020-05-01'); +psql:include/cagg_refresh_common.sql:103: ERROR: refresh window too small +-- Bad time input +CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::text, '2020-05-03'::text); +psql:include/cagg_refresh_common.sql:105: ERROR: invalid time argument type "text" +CALL refresh_continuous_aggregate('daily_temp', 0, '2020-05-01'); +psql:include/cagg_refresh_common.sql:106: ERROR: invalid time argument type "integer" +\set ON_ERROR_STOP 1 +-- Test different time types +CREATE TABLE conditions_date (time date NOT NULL, device int, temp float); +SELECT create_hypertable('conditions_date', 'time'); + create_hypertable +------------------------------ + (3,public,conditions_date,t) +(1 row) + +CREATE MATERIALIZED VIEW daily_temp_date +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp +FROM conditions_date +GROUP BY 1,2 WITH NO DATA; +CALL refresh_continuous_aggregate('daily_temp_date', '2020-05-01', '2020-05-03'); +-- Try max refresh window size +CALL refresh_continuous_aggregate('daily_temp_date', NULL, NULL); +-- Test smallint-based continuous aggregate +CREATE TABLE conditions_smallint (time smallint NOT NULL, device int, temp float); +SELECT create_hypertable('conditions_smallint', 'time', chunk_time_interval => 20); + create_hypertable +---------------------------------- + (5,public,conditions_smallint,t) +(1 row) + +INSERT INTO conditions_smallint +SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::smallint, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 +FROM generate_series(1, 100, 1) t; +CREATE OR REPLACE FUNCTION smallint_now() +RETURNS smallint LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0)::smallint + FROM conditions_smallint +$$; +\set ON_ERROR_STOP 0 +-- First try to create an integer-based continuous aggregate without +-- an now function. This should not be allowed. +CREATE MATERIALIZED VIEW cond_20_smallint +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions_smallint c +GROUP BY 1,2 WITH NO DATA; +psql:include/cagg_refresh_common.sql:150: ERROR: custom time function required on hypertable "conditions_smallint" +\set ON_ERROR_STOP 1 +SELECT set_integer_now_func('conditions_smallint', 'smallint_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW cond_20_smallint +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions_smallint c +GROUP BY 1,2 WITH NO DATA; +CALL refresh_continuous_aggregate('cond_20_smallint', 0::smallint, 70::smallint); +SELECT * FROM cond_20_smallint +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+------------------ + 0 | 0 | 6 + 0 | 1 | 19 + 0 | 2 | 14.5 + 0 | 3 | 21.4 + 20 | 0 | 15 + 20 | 1 | 16 + 20 | 2 | 23.3333333333333 + 20 | 3 | 13.6666666666667 + 40 | 0 | 21 + 40 | 1 | 19.4 + 40 | 2 | 22 + 40 | 3 | 21.4 +(12 rows) + +-- Try max refresh window size +CALL refresh_continuous_aggregate('cond_20_smallint', NULL, NULL); +-- Test int-based continuous aggregate +CREATE TABLE conditions_int (time int NOT NULL, device int, temp float); +SELECT create_hypertable('conditions_int', 'time', chunk_time_interval => 20); + create_hypertable +----------------------------- + (7,public,conditions_int,t) +(1 row) + +INSERT INTO conditions_int +SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::int, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 +FROM generate_series(1, 100, 1) t; +CREATE OR REPLACE FUNCTION int_now() +RETURNS int LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0) + FROM conditions_int +$$; +SELECT set_integer_now_func('conditions_int', 'int_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW cond_20_int +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(INT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions_int +GROUP BY 1,2 WITH NO DATA; +CALL refresh_continuous_aggregate('cond_20_int', 0, 65); +SELECT * FROM cond_20_int +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+------------------ + 0 | 0 | 6 + 0 | 1 | 19 + 0 | 2 | 14.5 + 0 | 3 | 21.4 + 20 | 0 | 15 + 20 | 1 | 16 + 20 | 2 | 23.3333333333333 + 20 | 3 | 13.6666666666667 + 40 | 0 | 21 + 40 | 1 | 19.4 + 40 | 2 | 22 + 40 | 3 | 21.4 +(12 rows) + +-- Try max refresh window size +CALL refresh_continuous_aggregate('cond_20_int', NULL, NULL); +-- Test bigint-based continuous aggregate +CREATE TABLE conditions_bigint (time bigint NOT NULL, device int, temp float); +SELECT create_hypertable('conditions_bigint', 'time', chunk_time_interval => 20); + create_hypertable +-------------------------------- + (9,public,conditions_bigint,t) +(1 row) + +INSERT INTO conditions_bigint +SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::bigint, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 +FROM generate_series(1, 100, 1) t; +CREATE OR REPLACE FUNCTION bigint_now() +RETURNS bigint LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0)::bigint + FROM conditions_bigint +$$; +SELECT set_integer_now_func('conditions_bigint', 'bigint_now'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW cond_20_bigint +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(BIGINT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions_bigint +GROUP BY 1,2 WITH NO DATA; +CALL refresh_continuous_aggregate('cond_20_bigint', 0, 75); +SELECT * FROM cond_20_bigint +ORDER BY 1,2; + bucket | device | avg_temp +--------+--------+------------------ + 0 | 0 | 6 + 0 | 1 | 19 + 0 | 2 | 14.5 + 0 | 3 | 21.4 + 20 | 0 | 15 + 20 | 1 | 16 + 20 | 2 | 23.3333333333333 + 20 | 3 | 13.6666666666667 + 40 | 0 | 21 + 40 | 1 | 19.4 + 40 | 2 | 22 + 40 | 3 | 21.4 +(12 rows) + +-- Try max refresh window size +CALL refresh_continuous_aggregate('cond_20_bigint', NULL, NULL); +-- Test that WITH NO DATA and WITH DATA works (we use whatever is the +-- default for Postgres, so we do not need to have test for the +-- default). +CREATE MATERIALIZED VIEW weekly_temp_without_data +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +CREATE MATERIALIZED VIEW weekly_temp_with_data +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH DATA; +psql:include/cagg_refresh_common.sql:255: NOTICE: refreshing continuous aggregate "weekly_temp_with_data" +SELECT * FROM weekly_temp_without_data; + day | device | avg_temp +-----+--------+---------- +(0 rows) + +SELECT * FROM weekly_temp_with_data ORDER BY 1,2; + day | device | avg_temp +------------------------------+--------+------------------ + Sun Apr 26 17:00:00 2020 PDT | 0 | 17.8181818181818 + Sun Apr 26 17:00:00 2020 PDT | 1 | 17.2474226804124 + Sun Apr 26 17:00:00 2020 PDT | 2 | 18.9803921568627 + Sun Apr 26 17:00:00 2020 PDT | 3 | 21.5631067961165 + Sun May 03 17:00:00 2020 PDT | 0 | 16.7659574468085 + Sun May 03 17:00:00 2020 PDT | 1 | 22.7272727272727 + Sun May 03 17:00:00 2020 PDT | 2 | 15.811320754717 + Sun May 03 17:00:00 2020 PDT | 3 | 19 +(8 rows) + +\set ON_ERROR_STOP 0 +-- REFRESH MATERIALIZED VIEW is blocked on continuous aggregates +REFRESH MATERIALIZED VIEW weekly_temp_without_data; +psql:include/cagg_refresh_common.sql:262: ERROR: operation not supported on continuous aggregate +-- These should fail since we do not allow refreshing inside a +-- transaction, not even as part of CREATE MATERIALIZED VIEW. +DO LANGUAGE PLPGSQL $$ BEGIN +CREATE MATERIALIZED VIEW weekly_conditions +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH DATA; +END $$; +psql:include/cagg_refresh_common.sql:274: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot be executed from a function +BEGIN; +CREATE MATERIALIZED VIEW weekly_conditions +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH DATA; +psql:include/cagg_refresh_common.sql:283: ERROR: CREATE MATERIALIZED VIEW ... WITH DATA cannot run inside a transaction block +COMMIT; +\set ON_ERROR_STOP 1 +-- This should not fail since we do not refresh the continuous +-- aggregate. +DO LANGUAGE PLPGSQL $$ BEGIN +CREATE MATERIALIZED VIEW weekly_conditions_1 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +END $$; +BEGIN; +CREATE MATERIALIZED VIEW weekly_conditions_2 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +COMMIT; diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index a0e4d9c2a4f..79c29c4ab94 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -107,7 +107,8 @@ if((${PG_VERSION_MAJOR} GREATER_EQUAL "15")) if(CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND TEST_FILES bgw_scheduler_control.sql) endif() - list(APPEND TEST_FILES merge_compress.sql) + list(APPEND TEST_FILES merge_compress.sql cagg_query_using_merge.sql + cagg_refresh_using_merge.sql) endif() if((${PG_VERSION_MAJOR} GREATER_EQUAL "17")) diff --git a/tsl/test/sql/cagg_query.sql b/tsl/test/sql/cagg_query.sql index dd27e12e039..41103e402b9 100644 --- a/tsl/test/sql/cagg_query.sql +++ b/tsl/test/sql/cagg_query.sql @@ -2,957 +2,11 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\set TEST_BASE_NAME cagg_query -SELECT - format('%s/results/%s_results_view.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW", - format('%s/results/%s_results_view_hashagg.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW_HASHAGG", - format('%s/results/%s_results_table.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_TABLE" -\gset -SELECT format('\! diff %s %s', :'TEST_RESULTS_VIEW', :'TEST_RESULTS_TABLE') as "DIFF_CMD", - format('\! diff %s %s', :'TEST_RESULTS_VIEW_HASHAGG', :'TEST_RESULTS_TABLE') as "DIFF_CMD2" -\gset - - -\set EXPLAIN 'EXPLAIN (VERBOSE, COSTS OFF)' - -SET client_min_messages TO NOTICE; - -CREATE TABLE conditions ( - timec TIMESTAMPTZ NOT NULL, - location TEXT NOT NULL, - temperature DOUBLE PRECISION NULL, - humidity DOUBLE PRECISION NULL - ); - -select table_name from create_hypertable( 'conditions', 'timec'); - -insert into conditions values ( '2018-01-01 09:20:00-08', 'SFO', 55, 45); -insert into conditions values ( '2018-01-02 09:30:00-08', 'por', 100, 100); -insert into conditions values ( '2018-01-02 09:20:00-08', 'SFO', 65, 45); -insert into conditions values ( '2018-01-02 09:10:00-08', 'NYC', 65, 45); -insert into conditions values ( '2018-11-01 09:20:00-08', 'NYC', 45, 30); -insert into conditions values ( '2018-11-01 10:40:00-08', 'NYC', 55, 35); -insert into conditions values ( '2018-11-01 11:50:00-08', 'NYC', 65, 40); -insert into conditions values ( '2018-11-01 12:10:00-08', 'NYC', 75, 45); -insert into conditions values ( '2018-11-01 13:10:00-08', 'NYC', 85, 50); -insert into conditions values ( '2018-11-02 09:20:00-08', 'NYC', 10, 10); -insert into conditions values ( '2018-11-02 10:30:00-08', 'NYC', 20, 15); -insert into conditions values ( '2018-11-02 11:40:00-08', 'NYC', null, null); -insert into conditions values ( '2018-11-03 09:50:00-08', 'NYC', null, null); - -create table location_tab( locid integer, locname text ); -insert into location_tab values( 1, 'SFO'); -insert into location_tab values( 2, 'NYC'); -insert into location_tab values( 3, 'por'); - -create materialized view mat_m1( location, timec, minl, sumt , sumh) -WITH (timescaledb.continuous, timescaledb.materialized_only=false) -as -select location, time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by time_bucket('1day', timec), location WITH NO DATA; - ---compute time_bucketted max+bucket_width for the materialized view -SELECT time_bucket('1day' , q.timeval+ '1day'::interval) -FROM ( select max(timec)as timeval from conditions ) as q; -CALL refresh_continuous_aggregate('mat_m1', NULL, NULL); - ---test first/last -create materialized view mat_m2(location, timec, firsth, lasth, maxtemp, mintemp) -WITH (timescaledb.continuous, timescaledb.materialized_only=false) -as -select location, time_bucket('1day', timec), first(humidity, timec), last(humidity, timec), max(temperature), min(temperature) -from conditions -group by time_bucket('1day', timec), location WITH NO DATA; ---time that refresh assumes as now() for repeatability -SELECT time_bucket('1day' , q.timeval+ '1day'::interval) -FROM ( select max(timec)as timeval from conditions ) as q; -CALL refresh_continuous_aggregate('mat_m2', NULL, NULL); - ---normal view -- -create or replace view regview( location, timec, minl, sumt , sumh) -as -select location, time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) -from conditions -group by location, time_bucket('1day', timec); - -set enable_hashagg = false; - --- NO pushdown cases --- ---when we have addl. attrs in order by that are not in the --- group by, we will still need a sort -:EXPLAIN -select * from mat_m1 order by sumh, sumt, minl, timec ; -:EXPLAIN -select * from regview order by timec desc; - --- PUSHDOWN cases -- --- all group by elts in order by , reorder group by elts to match --- group by order --- This should prevent an additional sort after GroupAggregate -:EXPLAIN -select * from mat_m1 order by timec desc, location; - -:EXPLAIN -select * from mat_m1 order by location, timec desc; - -:EXPLAIN -select * from mat_m1 order by location, timec asc; -:EXPLAIN -select * from mat_m1 where timec > '2018-10-01' order by timec desc; --- outer sort is used by mat_m1 for grouping. But doesn't avoid a sort after the join --- -:EXPLAIN -select l.locid, mat_m1.* from mat_m1 , location_tab l where timec > '2018-10-01' and l.locname = mat_m1.location order by timec desc; - -:EXPLAIN -select * from mat_m2 where timec > '2018-10-01' order by timec desc; - -:EXPLAIN -select * from (select * from mat_m2 where timec > '2018-10-01' order by timec desc ) as q limit 1; - -:EXPLAIN -select * from (select * from mat_m2 where timec > '2018-10-01' order by timec desc , location asc nulls first) as q limit 1; - ---plans with CTE -:EXPLAIN -with m1 as ( -Select * from mat_m2 where timec > '2018-10-01' order by timec desc ) -select * from m1; - --- should reorder mat_m1 group by only based on mat_m1 order-by -:EXPLAIN -select * from mat_m1, mat_m2 where mat_m1.timec > '2018-10-01' and mat_m1.timec = mat_m2.timec order by mat_m1.timec desc; ---should reorder only for mat_m1. -:EXPLAIN -select * from mat_m1, regview where mat_m1.timec > '2018-10-01' and mat_m1.timec = regview.timec order by mat_m1.timec desc; - -select l.locid, mat_m1.* from mat_m1 , location_tab l where timec > '2018-10-01' and l.locname = mat_m1.location order by timec desc; - -\set ECHO none -SET client_min_messages TO error; -\o :TEST_RESULTS_VIEW -select * from mat_m1 order by timec desc, location; -select * from mat_m1 order by location, timec desc; -select * from mat_m1 order by location, timec asc; -select * from mat_m1 where timec > '2018-10-01' order by timec desc; -select * from mat_m2 where timec > '2018-10-01' order by timec desc; -\o -RESET client_min_messages; -\set ECHO all - ----- Run the same queries with hash agg enabled now -set enable_hashagg = true; -\set ECHO none -SET client_min_messages TO error; -\o :TEST_RESULTS_VIEW_HASHAGG -select * from mat_m1 order by timec desc, location; -select * from mat_m1 order by location, timec desc; -select * from mat_m1 order by location, timec asc; -select * from mat_m1 where timec > '2018-10-01' order by timec desc; -select * from mat_m2 where timec > '2018-10-01' order by timec desc; -\o -RESET client_min_messages; -\set ECHO all - ---- Run the queries directly on the table now -set enable_hashagg = true; -\set ECHO none -SET client_min_messages TO error; -\o :TEST_RESULTS_TABLE -SELECT location, time_bucket('1day', timec) as timec, min(location) as minl, sum(temperature) as sumt, sum(humidity) as sumh from conditions group by time_bucket('1day', timec) , location -order by timec desc, location; -SELECT location, time_bucket('1day', timec) as timec, min(location) as minl, sum(temperature) as sumt, sum(humidity) as sumh from conditions group by time_bucket('1day', timec) , location -order by location, timec desc; -SELECT location, time_bucket('1day', timec) as timec, min(location) as minl, sum(temperature) as sumt, sum(humidity) as sumh from conditions group by time_bucket('1day', timec) , location -order by location, timec asc; -select * from (SELECT location, time_bucket('1day', timec) as timec, min(location) as minl, sum(temperature) as sumt, sum(humidity) as sumh from conditions -group by time_bucket('1day', timec) , location ) as q -where timec > '2018-10-01' order by timec desc; ---comparison for mat_m2 queries -select * from ( -select location, time_bucket('1day', timec) as timec, first(humidity, timec) firsth, last(humidity, timec) lasth, max(temperature) maxtemp, min(temperature) mintemp -from conditions -group by time_bucket('1day', timec), location) as q -where timec > '2018-10-01' order by timec desc limit 10; -\o -RESET client_min_messages; -\set ECHO all - --- diff results view select and table select -:DIFF_CMD -:DIFF_CMD2 - ---check if the guc works , reordering will not work -set timescaledb.enable_cagg_reorder_groupby = false; -set enable_hashagg = false; -:EXPLAIN -select * from mat_m1 order by timec desc, location; - ------------------------------------------------------------------------ --- Test the cagg_watermark function. The watermark gives the point --- where to UNION raw and materialized data in real-time --- aggregation. Specifically, test that the watermark caching works as --- expected. ------------------------------------------------------------------------ - --- Insert some more data so that there is something to UNION in --- real-time aggregation. - -insert into conditions values ( '2018-12-02 20:10:00-08', 'SFO', 55, 45); -insert into conditions values ( '2018-12-02 21:20:00-08', 'SFO', 65, 45); -insert into conditions values ( '2018-12-02 20:30:00-08', 'NYC', 65, 45); -insert into conditions values ( '2018-12-02 21:50:00-08', 'NYC', 45, 30); - --- Test join of two caggs. Joining two caggs will force the cache to --- reset every time the watermark function is invoked on a different --- cagg in the same query. -SELECT mat_hypertable_id AS mat_id, - raw_hypertable_id AS raw_id, - schema_name AS mat_schema, - table_name AS mat_name, - format('%I.%I', schema_name, table_name) AS mat_table -FROM _timescaledb_catalog.continuous_agg ca, _timescaledb_catalog.hypertable h -WHERE user_view_name='mat_m1' -AND h.id = ca.mat_hypertable_id \gset - -BEGIN; - --- Query without join -SELECT m1.location, m1.timec, sumt, sumh -FROM mat_m1 m1 -ORDER BY m1.location COLLATE "C", m1.timec DESC -LIMIT 10; - --- Query that joins two caggs. This should force the watermark cache --- to reset when the materialized hypertable ID changes. A hash join --- could potentially read all values from mat_m1 then all values from --- mat_m2. This would be the optimal situation for cagg_watermark --- caching. We want to avoid it in tests to see that caching doesn't --- do anything wrong in worse situations (e.g., a nested loop join). -SET enable_hashjoin=false; - -SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp -FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 -ON (m1.location = m2.location -AND m1.timec = m2.timec) -ORDER BY m1.location COLLATE "C", m1.timec DESC -LIMIT 10; - --- Show the current watermark -SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_id)); - --- The watermark should, in this case, be the same as the invalidation --- threshold -SELECT _timescaledb_functions.to_timestamp(watermark) -FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold -WHERE hypertable_id = :raw_id; - --- The watermark is the end of materialization (end of last bucket) --- while the MAX is the start of the last bucket -SELECT max(timec) FROM :mat_table; - --- Drop the most recent chunk -SELECT chunk_name, range_start, range_end -FROM timescaledb_information.chunks -WHERE hypertable_name = :'mat_name'; - -SELECT drop_chunks('mat_m1', newer_than=>'2018-01-01'::timestamptz); - -SELECT chunk_name, range_start, range_end -FROM timescaledb_information.chunks -WHERE hypertable_name = :'mat_name'; - --- The watermark should be updated to reflect the dropped data (i.e., --- the cache should be reset) -SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_id)); - --- Since we removed the last chunk, the invalidation threshold doesn't --- move back, while the watermark does. -SELECT _timescaledb_functions.to_timestamp(watermark) -FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold -WHERE hypertable_id = :raw_id; - --- Compare the new watermark to the MAX time in the table -SELECT max(timec) FROM :mat_table; - --- Try a subtransaction -SAVEPOINT clear_cagg; - -SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp -FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 -ON (m1.location = m2.location -AND m1.timec = m2.timec) -ORDER BY m1.location COLLATE "C", m1.timec DESC -LIMIT 10; - -ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); - -SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp -FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 -ON (m1.location = m2.location -AND m1.timec = m2.timec) -ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST, - lasth NULLS LAST, mintemp NULLS LAST, maxtemp NULLS LAST -LIMIT 10; - -ROLLBACK; - ------ --- Tests with time_bucket and offset/origin ------ -CREATE TABLE temperature ( - time timestamptz NOT NULL, - value float -); - -SELECT create_hypertable('temperature', 'time'); - -INSERT INTO temperature VALUES ('2000-01-01 01:00:00'::timestamptz, 5); - -CREATE TABLE temperature_wo_tz ( - time timestamp NOT NULL, - value float -); - -SELECT create_hypertable('temperature_wo_tz', 'time'); - -INSERT INTO temperature_wo_tz VALUES ('2000-01-01 01:00:00'::timestamp, 5); - -CREATE TABLE temperature_date ( - time date NOT NULL, - value float -); - -SELECT create_hypertable('temperature_date', 'time'); - -INSERT INTO temperature_date VALUES ('2000-01-01 01:00:00'::timestamp, 5); - --- Integer based tables -CREATE TABLE table_smallint ( - time smallint, - data smallint -); - -CREATE TABLE table_int ( - time int, - data int -); - -CREATE TABLE table_bigint ( - time bigint, - data bigint -); - -SELECT create_hypertable('table_smallint', 'time', chunk_time_interval => 10); -SELECT create_hypertable('table_int', 'time', chunk_time_interval => 10); -SELECT create_hypertable('table_bigint', 'time', chunk_time_interval => 10); - -CREATE OR REPLACE FUNCTION integer_now_smallint() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_smallint $$; -CREATE OR REPLACE FUNCTION integer_now_int() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_int $$; -CREATE OR REPLACE FUNCTION integer_now_bigint() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_bigint $$; - -SELECT set_integer_now_func('table_smallint', 'integer_now_smallint'); -SELECT set_integer_now_func('table_int', 'integer_now_int'); -SELECT set_integer_now_func('table_bigint', 'integer_now_bigint'); - -INSERT INTO table_smallint VALUES(1,2); -INSERT INTO table_int VALUES(1,2); -INSERT INTO table_bigint VALUES(1,2); - -CREATE VIEW caggs_info AS -SELECT user_view_schema, user_view_name, bucket_func, bucket_width, bucket_origin, bucket_offset, bucket_timezone, bucket_fixed_width -FROM _timescaledb_catalog.continuous_aggs_bucket_function NATURAL JOIN _timescaledb_catalog.continuous_agg; - ---- --- Tests with CAgg creation ---- -CREATE MATERIALIZED VIEW cagg_4_hours - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours'; -DROP MATERIALIZED VIEW cagg_4_hours; - -CREATE MATERIALIZED VIEW cagg_4_hours_offset - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, '30m'::interval), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset'; -DROP MATERIALIZED VIEW cagg_4_hours_offset; - -CREATE MATERIALIZED VIEW cagg_4_hours_offset2 - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset2'; -DROP MATERIALIZED VIEW cagg_4_hours_offset2; - --- Variable buckets (timezone is provided) with offset -CREATE MATERIALIZED VIEW cagg_4_hours_offset_ts - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, timezone=>'UTC'), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_ts'; -DROP MATERIALIZED VIEW cagg_4_hours_offset_ts; - -CREATE MATERIALIZED VIEW cagg_4_hours_origin - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin'; -DROP MATERIALIZED VIEW cagg_4_hours_origin; - --- Using named parameter -CREATE MATERIALIZED VIEW cagg_4_hours_origin2 - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00 PST'::timestamptz), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin2'; -DROP MATERIALIZED VIEW cagg_4_hours_origin2; - --- Variable buckets (timezone is provided) with origin -CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00 PST'::timestamptz, timezone=>'UTC'), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts'; -DROP MATERIALIZED VIEW cagg_4_hours_origin_ts; - --- Without named parameter -CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts2 - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, 'UTC', '2000-01-01 01:00:00 PST'::timestamptz), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts2'; -DROP MATERIALIZED VIEW cagg_4_hours_origin_ts2; - --- Timestamp based CAggs -CREATE MATERIALIZED VIEW cagg_4_hours_wo_tz - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time), max(value) - FROM temperature_wo_tz - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_wo_tz'; - -CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00'::timestamp), max(value) - FROM temperature_wo_tz - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz'; -DROP MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz; - --- Variable buckets (timezone is provided) with origin -CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz2 - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00'::timestamp), max(value) - FROM temperature_wo_tz - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz2'; -DROP MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz2; - -CREATE MATERIALIZED VIEW cagg_4_hours_offset_wo_tz - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval), max(value) - FROM temperature_wo_tz - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_wo_tz'; -DROP MATERIALIZED VIEW cagg_4_hours_offset_wo_tz; -DROP MATERIALIZED VIEW cagg_4_hours_wo_tz; - --- Date based CAggs -CREATE MATERIALIZED VIEW cagg_4_hours_date - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 days', time), max(value) - FROM temperature_date - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date'; -DROP MATERIALIZED VIEW cagg_4_hours_date; - -CREATE MATERIALIZED VIEW cagg_4_hours_date_origin - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 days', time, '2000-01-01'::date), max(value) - FROM temperature_date - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin'; -DROP MATERIALIZED VIEW cagg_4_hours_date_origin; - -CREATE MATERIALIZED VIEW cagg_4_hours_date_origin2 - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 days', time, origin=>'2000-01-01'::date), max(value) - FROM temperature_date - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin2'; -DROP MATERIALIZED VIEW cagg_4_hours_date_origin2; - -CREATE MATERIALIZED VIEW cagg_4_hours_date_offset - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 days', time, "offset"=>'30m'::interval), max(value) - FROM temperature_date - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_offset'; -DROP MATERIALIZED VIEW cagg_4_hours_date_offset; - --- Integer based CAggs -CREATE MATERIALIZED VIEW cagg_smallint - WITH (timescaledb.continuous, timescaledb.materialized_only=true) - AS SELECT time_bucket('2', time), SUM(data) as value - FROM table_smallint - GROUP BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint'; -DROP MATERIALIZED VIEW cagg_smallint; - -CREATE MATERIALIZED VIEW cagg_smallint_offset - WITH (timescaledb.continuous, timescaledb.materialized_only=true) - AS SELECT time_bucket('2', time, "offset"=>1::smallint), SUM(data) as value - FROM table_smallint - GROUP BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint_offset'; -DROP MATERIALIZED VIEW cagg_smallint_offset; - -CREATE MATERIALIZED VIEW cagg_int - WITH (timescaledb.continuous, timescaledb.materialized_only=true) - AS SELECT time_bucket('2', time), SUM(data) as value - FROM table_int - GROUP BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int'; -DROP MATERIALIZED VIEW cagg_int; - -CREATE MATERIALIZED VIEW cagg_int_offset - WITH (timescaledb.continuous, timescaledb.materialized_only=true) - AS SELECT time_bucket('2', time, "offset"=>1::int), SUM(data) as value - FROM table_int - GROUP BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int_offset'; -DROP MATERIALIZED VIEW cagg_int_offset; - -CREATE MATERIALIZED VIEW cagg_bigint - WITH (timescaledb.continuous, timescaledb.materialized_only=true) - AS SELECT time_bucket('2', time), SUM(data) as value - FROM table_bigint - GROUP BY 1 WITH NO DATA; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint'; -DROP MATERIALIZED VIEW cagg_bigint; - -CREATE MATERIALIZED VIEW cagg_bigint_offset - WITH (timescaledb.continuous, timescaledb.materialized_only=true) - AS SELECT time_bucket('2', time, "offset"=>1::bigint), SUM(data) as value - FROM table_bigint - GROUP BY 1 WITH NO DATA; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint_offset'; -DROP MATERIALIZED VIEW cagg_bigint_offset; - --- Without named parameter -CREATE MATERIALIZED VIEW cagg_bigint_offset2 - WITH (timescaledb.continuous, timescaledb.materialized_only=true) - AS SELECT time_bucket('2', time, 1::bigint), SUM(data) as value - FROM table_bigint - GROUP BY 1 WITH NO DATA; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint_offset2'; - --- mess with the bucket_func signature to make sure it will raise an exception +-- Connect as superuser to use SET ROLE later \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER -\set ON_ERROR_STOP 0 -BEGIN; -UPDATE _timescaledb_catalog.continuous_aggs_bucket_function SET bucket_func = 'func_does_not_exist()'; --- should error because function does not exist -CALL refresh_continuous_aggregate('cagg_bigint_offset2', NULL, NULL); -ROLLBACK; -\set ON_ERROR_STOP 1 -\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER - -DROP MATERIALIZED VIEW cagg_bigint_offset2; - --- Test invalid bucket definitions -\set ON_ERROR_STOP 0 --- Offset and origin at the same time is not allowed (function does not exists) -CREATE MATERIALIZED VIEW cagg_4_hours_offset_and_origin - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, origin=>'2000-01-01 01:00:00 PST'::timestamptz), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; - --- Offset and origin at the same time is not allowed (function does exists but invalid parameter combination) -CREATE MATERIALIZED VIEW cagg_4_hours_offset_and_origin - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, origin=>'2000-01-01 01:00:00 PST'::timestamptz, timezone=>'UTC'), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; -\set ON_ERROR_STOP 1 - ---- --- Tests with CAgg processing ---- - --- Check used timezone -SHOW timezone; - --- Populate it -INSERT INTO temperature - SELECT time, 5 - FROM generate_series('2000-01-01 01:00:00 PST'::timestamptz, - '2000-01-01 23:59:59 PST','1m') time; - -INSERT INTO temperature - SELECT time, 6 - FROM generate_series('2020-01-01 00:00:00 PST'::timestamptz, - '2020-01-01 23:59:59 PST','1m') time; - --- Create CAggs -CREATE MATERIALIZED VIEW cagg_4_hours - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; - -CREATE MATERIALIZED VIEW cagg_4_hours_offset - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, '30m'::interval), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; - --- Align origin with first value -CREATE MATERIALIZED VIEW cagg_4_hours_origin - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; - --- Query the CAggs and check that all buckets are materialized -SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -SELECT * FROM cagg_4_hours; -ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours; - -SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -SELECT * FROM cagg_4_hours_offset; -ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours_offset; - -SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; -SELECT * FROM cagg_4_hours_origin; -ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours_origin; - --- Update the last bucket and re-materialize -INSERT INTO temperature values('2020-01-01 23:55:00 PST', 10); - -CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); - -SELECT * FROM cagg_4_hours; -SELECT * FROM cagg_4_hours_offset; -SELECT * FROM cagg_4_hours_origin; - --- Check the real-time functionality -ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=false); -ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=false); -ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=false); - --- Check watermarks -SELECT continuous_agg.user_view_name, continuous_aggs_watermark.watermark, _timescaledb_functions.to_timestamp(watermark) - FROM _timescaledb_catalog.continuous_aggs_watermark - JOIN _timescaledb_catalog.continuous_agg USING (mat_hypertable_id) -WHERE user_view_name LIKE 'cagg_4_hours%' -ORDER BY mat_hypertable_id, watermark; - --- Insert new data -INSERT INTO temperature values('2020-01-02 00:10:00 PST', 2222); -INSERT INTO temperature values('2020-01-02 05:35:00 PST', 5555); -INSERT INTO temperature values('2020-01-02 09:05:00 PST', 8888); - --- Watermark is at Thu Jan 02 00:00:00 2020 PST - all inserted tuples should be seen -SELECT * FROM cagg_4_hours; - --- Watermark is at Thu Jan 02 00:30:00 2020 PST - only two inserted tuples should be seen -SELECT * FROM cagg_4_hours_offset; - --- Watermark is at Thu Jan 02 01:00:00 2020 PST - only two inserted tuples should be seen -SELECT * FROM cagg_4_hours_origin; - --- Update materialized data -SET client_min_messages TO DEBUG1; -CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); -RESET client_min_messages; - --- Query the CAggs and check that all buckets are materialized -SELECT * FROM cagg_4_hours; -ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours; -SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - -SELECT * FROM cagg_4_hours_offset; -ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours_offset; -SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - -SELECT * FROM cagg_4_hours_origin; -ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours_origin; -SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - --- Test invalidations -TRUNCATE temperature; -CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); - -INSERT INTO temperature - SELECT time, 5 - FROM generate_series('2000-01-01 01:00:00 PST'::timestamptz, - '2000-01-01 23:59:59 PST','1m') time; - -INSERT INTO temperature - SELECT time, 6 - FROM generate_series('2020-01-01 00:00:00 PST'::timestamptz, - '2020-01-01 23:59:59 PST','1m') time; - -INSERT INTO temperature values('2020-01-02 01:05:00+01', 2222); -INSERT INTO temperature values('2020-01-02 01:35:00+01', 5555); -INSERT INTO temperature values('2020-01-02 05:05:00+01', 8888); - -SET client_min_messages TO DEBUG1; -CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); -RESET client_min_messages; - -ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours; -ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=false); -SELECT * FROM cagg_4_hours; -SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - -ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours_offset; -ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=false); -SELECT * FROM cagg_4_hours_offset; -SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - -ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); -SELECT * FROM cagg_4_hours_origin; -ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=false); -SELECT * FROM cagg_4_hours_origin; -SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - ---- Test with variable width buckets (use February, since hourly origins are not supported with variable sized buckets) -TRUNCATE temperature; -INSERT INTO temperature - SELECT time, 5 - FROM generate_series('2000-02-01 01:00:00 PST'::timestamptz, - '2000-02-01 23:59:59 PST','1m') time; -INSERT INTO temperature - SELECT time, 6 - FROM generate_series('2020-02-01 01:00:00 PST'::timestamptz, - '2020-02-01 23:59:59 PST','1m') time; +-- Run tests with default role +SET ROLE :ROLE_DEFAULT_PERM_USER; -SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log ORDER BY 1, 2, 3; - -CREATE MATERIALIZED VIEW cagg_1_year - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS - SELECT time_bucket('1 year', time), max(value) - FROM temperature - GROUP BY 1 ORDER BY 1; - -SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log ORDER BY 1, 2, 3; - ---- --- Tests with integer based hypertables ---- -TRUNCATE table_int; - -INSERT INTO table_int - SELECT time, 5 - FROM generate_series(-50, 50) time; - -CREATE MATERIALIZED VIEW cagg_int - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS SELECT time_bucket('10', time), SUM(data) as value - FROM table_int - GROUP BY 1 ORDER BY 1; - -CREATE MATERIALIZED VIEW cagg_int_offset - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS SELECT time_bucket('10', time, "offset"=>5), SUM(data) as value - FROM table_int - GROUP BY 1 ORDER BY 1; - --- Compare bucketing results -SELECT time_bucket('10', time), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; -SELECT * FROM cagg_int; - -SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; -SELECT * FROM cagg_int_offset; - --- Update table -INSERT INTO table_int VALUES(51, 100); -INSERT INTO table_int VALUES(100, 555); - --- Compare bucketing results -SELECT time_bucket('10', time), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; -SELECT * FROM cagg_int; -CALL refresh_continuous_aggregate('cagg_int', NULL, NULL); -SELECT * FROM cagg_int; - -SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; -SELECT * FROM cagg_int_offset; -- the value 100 is part of the already serialized bucket, so it should not be visible -CALL refresh_continuous_aggregate('cagg_int_offset', NULL, NULL); -SELECT * FROM cagg_int_offset; - --- Ensure everything was materialized -ALTER MATERIALIZED VIEW cagg_int SET (timescaledb.materialized_only=true); -ALTER MATERIALIZED VIEW cagg_int_offset SET (timescaledb.materialized_only=true); - -SELECT * FROM cagg_int; -SELECT * FROM cagg_int_offset; - --- Check that the refresh is properly aligned -INSERT INTO table_int VALUES(114, 0); - -SET client_min_messages TO DEBUG1; -CALL refresh_continuous_aggregate('cagg_int_offset', 110, 130); -RESET client_min_messages; - -SELECT * FROM cagg_int_offset; -SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; - --- Variable sized buckets with origin -CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 year', time, origin=>'2000-01-01 01:05:00 UTC'::timestamptz, timezone=>'UTC') AS hour_bucket, max(value) AS max_value - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin'; -DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin; - --- Variable due to the used timezone -CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin2 - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 hour', time, origin=>'2000-01-01 01:05:00 UTC'::timestamptz, timezone=>'UTC') AS hour_bucket, max(value) AS max_value - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin2'; -DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin2; - --- Variable with offset -CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin3 - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 year', time, "offset"=>'5 minutes'::interval) AS hour_bucket, max(value) AS max_value - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin3'; -DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin3; - ---- --- Test with blocking a few broken configurations ---- -\set ON_ERROR_STOP 0 - --- Unfortunately '\set VERBOSITY verbose' cannot be used here to check the error details --- since it also prints the line number of the location, which is depended on the build - --- Different time origin -CREATE MATERIALIZED VIEW cagg_1_hour_origin - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 hour', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS hour_bucket, max(value) AS max_value - FROM temperature - GROUP BY 1 ORDER BY 1; - -CREATE MATERIALIZED VIEW cagg_1_week_origin - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 week', hour_bucket, origin=>'2022-01-02 01:00:00 PST'::timestamptz) AS week_bucket, max(max_value) AS max_value - FROM cagg_1_hour_origin - GROUP BY 1 ORDER BY 1; - --- Different time offset -CREATE MATERIALIZED VIEW cagg_1_hour_offset - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 hour', time, "offset"=>'30m'::interval) AS hour_bucket, max(value) AS max_value - FROM temperature - GROUP BY 1 ORDER BY 1; - -CREATE MATERIALIZED VIEW cagg_1_week_offset - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 week', hour_bucket, "offset"=>'35m'::interval) AS week_bucket, max(max_value) AS max_value - FROM cagg_1_hour_offset - GROUP BY 1 ORDER BY 1; - --- Different integer offset -CREATE MATERIALIZED VIEW cagg_int_offset_5 - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS SELECT time_bucket('10', time, "offset"=>5) AS time, SUM(data) AS value - FROM table_int - GROUP BY 1 ORDER BY 1; - -CREATE MATERIALIZED VIEW cagg_int_offset_10 - WITH (timescaledb.continuous, timescaledb.materialized_only=false) - AS SELECT time_bucket('10', time, "offset"=>10) AS time, SUM(value) AS value - FROM cagg_int_offset_5 - GROUP BY 1 ORDER BY 1; - -\set ON_ERROR_STOP 1 - -DROP MATERIALIZED VIEW cagg_1_hour_origin; -DROP MATERIALIZED VIEW cagg_1_hour_offset; -DROP MATERIALIZED VIEW cagg_int_offset_5; - ---- --- CAGGs on CAGGs tests ---- -CREATE MATERIALIZED VIEW cagg_1_hour_offset - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 hour', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS hour_bucket, max(value) AS max_value - FROM temperature - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_offset'; - -CREATE MATERIALIZED VIEW cagg_1_week_offset - WITH (timescaledb.continuous) AS - SELECT time_bucket('1 week', hour_bucket, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS week_bucket, max(max_value) AS max_value - FROM cagg_1_hour_offset - GROUP BY 1 ORDER BY 1; -SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_week_offset'; - --- Compare output -SELECT * FROM cagg_1_week_offset; -SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - -INSERT INTO temperature values('2030-01-01 05:05:00 PST', 22222); -INSERT INTO temperature values('2030-01-03 05:05:00 PST', 55555); - --- Compare real-time functionality -ALTER MATERIALIZED VIEW cagg_1_hour_offset SET (timescaledb.materialized_only=false); -ALTER MATERIALIZED VIEW cagg_1_week_offset SET (timescaledb.materialized_only=false); - -SELECT * FROM cagg_1_week_offset; -SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - --- Test refresh -CALL refresh_continuous_aggregate('cagg_1_hour_offset', NULL, NULL); -CALL refresh_continuous_aggregate('cagg_1_week_offset', NULL, NULL); - --- Everything should be now materailized -ALTER MATERIALIZED VIEW cagg_1_hour_offset SET (timescaledb.materialized_only=false); -ALTER MATERIALIZED VIEW cagg_1_week_offset SET (timescaledb.materialized_only=false); - -SELECT * FROM cagg_1_week_offset; -SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - -TRUNCATE temperature; - -SELECT * FROM cagg_1_week_offset; -SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; - -DROP VIEW caggs_info; +\set TEST_BASE_NAME cagg_query +\ir include/cagg_query_common.sql diff --git a/tsl/test/sql/cagg_query_using_merge.sql b/tsl/test/sql/cagg_query_using_merge.sql new file mode 100644 index 00000000000..5b0153cfb60 --- /dev/null +++ b/tsl/test/sql/cagg_query_using_merge.sql @@ -0,0 +1,15 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- Connect as superuser to use SET ROLE later +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER + +-- Run tests with default role +SET ROLE :ROLE_DEFAULT_PERM_USER; + +-- Enable MERGE statements for continuous aggregate refresh +SET timescaledb.enable_merge_on_cagg_refresh TO ON; + +\set TEST_BASE_NAME cagg_query_using_merge +\ir include/cagg_query_common.sql diff --git a/tsl/test/sql/cagg_refresh.sql b/tsl/test/sql/cagg_refresh.sql index ec8c0b92f0e..645cdb25cc1 100644 --- a/tsl/test/sql/cagg_refresh.sql +++ b/tsl/test/sql/cagg_refresh.sql @@ -2,312 +2,4 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. --- Disable background workers since we are testing manual refresh -\c :TEST_DBNAME :ROLE_SUPERUSER -SELECT _timescaledb_functions.stop_background_workers(); -SET ROLE :ROLE_DEFAULT_PERM_USER; - -CREATE TABLE conditions (time timestamptz NOT NULL, device int, temp float); -SELECT create_hypertable('conditions', 'time'); - -SELECT setseed(.12); - -INSERT INTO conditions -SELECT t, ceil(abs(timestamp_hash(t::timestamp))%4)::int, abs(timestamp_hash(t::timestamp))%40 -FROM generate_series('2020-05-01', '2020-05-05', '10 minutes'::interval) t; - --- Show the most recent data -SELECT * FROM conditions -ORDER BY time DESC, device -LIMIT 10; - -CREATE MATERIALIZED VIEW daily_temp -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp -FROM conditions -GROUP BY 1,2 WITH NO DATA; - --- The continuous aggregate should be empty -SELECT * FROM daily_temp -ORDER BY day DESC, device; - --- Refresh one bucket (1 day): -SHOW timezone; --- The refresh of a single bucket must align with the start of the day --- in the bucket's time zone (which is UTC, since time_bucket doesn't --- support time zone arg) -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 UTC', '2020-05-04 00:00 UTC'); -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 17:00 PDT', '2020-05-04 17:00 PDT'); - -\set ON_ERROR_STOP 0 -\set VERBOSITY default --- These refreshes will fail since they don't align with the bucket's --- time zone -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', '2020-05-04'); -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 PDT', '2020-05-04 00:00 PDT'); - --- Refresh window less than one bucket -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 UTC', '2020-05-03 23:59 UTC'); --- Refresh window bigger than one bucket, but failing since it is not --- aligned with bucket boundaries so that it covers a full bucket: --- --- Refresh window: [----------) --- Buckets: [------|------] -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 01:00 UTC', '2020-05-04 08:00 UTC'); -\set VERBOSITY terse -\set ON_ERROR_STOP 1 - --- Refresh the most recent few days: -CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00'); - -SELECT * FROM daily_temp -ORDER BY day DESC, device; - --- Refresh the rest (and try DEBUG output) -SET client_min_messages TO DEBUG1; -CALL refresh_continuous_aggregate('daily_temp', '2020-04-30', '2020-05-04'); -RESET client_min_messages; - --- Compare the aggregate to the equivalent query on the source table -SELECT * FROM daily_temp -ORDER BY day DESC, device; - -SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp -FROM conditions -GROUP BY 1,2 -ORDER BY 1 DESC,2; - --- Test unusual, but valid input -CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::timestamptz, '2020-05-03'::date); -CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::date, '2020-05-03'::date); - --- Unbounded window forward in time -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', NULL); -CALL refresh_continuous_aggregate('daily_temp', NULL, NULL); - --- Unbounded window back in time -CALL refresh_continuous_aggregate('daily_temp', NULL, '2020-05-01'); - --- Test bad input -\set ON_ERROR_STOP 0 --- Bad continuous aggregate name -CALL refresh_continuous_aggregate(NULL, '2020-05-03', '2020-05-05'); -CALL refresh_continuous_aggregate('xyz', '2020-05-03', '2020-05-05'); --- Valid object, but not a continuous aggregate -CALL refresh_continuous_aggregate('conditions', '2020-05-03', '2020-05-05'); --- Object ID with no object -CALL refresh_continuous_aggregate(1, '2020-05-03', '2020-05-05'); --- Lacking arguments -CALL refresh_continuous_aggregate('daily_temp'); -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03'); --- Bad time ranges -CALL refresh_continuous_aggregate('daily_temp', 'xyz', '2020-05-05'); -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', 'xyz'); -CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', '2020-05-01'); --- Bad time input -CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::text, '2020-05-03'::text); -CALL refresh_continuous_aggregate('daily_temp', 0, '2020-05-01'); -\set ON_ERROR_STOP 1 - --- Test different time types -CREATE TABLE conditions_date (time date NOT NULL, device int, temp float); -SELECT create_hypertable('conditions_date', 'time'); - -CREATE MATERIALIZED VIEW daily_temp_date -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp -FROM conditions_date -GROUP BY 1,2 WITH NO DATA; - -CALL refresh_continuous_aggregate('daily_temp_date', '2020-05-01', '2020-05-03'); - --- Try max refresh window size -CALL refresh_continuous_aggregate('daily_temp_date', NULL, NULL); - --- Test smallint-based continuous aggregate -CREATE TABLE conditions_smallint (time smallint NOT NULL, device int, temp float); -SELECT create_hypertable('conditions_smallint', 'time', chunk_time_interval => 20); - -INSERT INTO conditions_smallint -SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::smallint, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 -FROM generate_series(1, 100, 1) t; - -CREATE OR REPLACE FUNCTION smallint_now() -RETURNS smallint LANGUAGE SQL STABLE AS -$$ - SELECT coalesce(max(time), 0)::smallint - FROM conditions_smallint -$$; - -\set ON_ERROR_STOP 0 --- First try to create an integer-based continuous aggregate without --- an now function. This should not be allowed. -CREATE MATERIALIZED VIEW cond_20_smallint -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp -FROM conditions_smallint c -GROUP BY 1,2 WITH NO DATA; -\set ON_ERROR_STOP 1 - -SELECT set_integer_now_func('conditions_smallint', 'smallint_now'); - -CREATE MATERIALIZED VIEW cond_20_smallint -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp -FROM conditions_smallint c -GROUP BY 1,2 WITH NO DATA; - -CALL refresh_continuous_aggregate('cond_20_smallint', 0::smallint, 70::smallint); - -SELECT * FROM cond_20_smallint -ORDER BY 1,2; - --- Try max refresh window size -CALL refresh_continuous_aggregate('cond_20_smallint', NULL, NULL); - --- Test int-based continuous aggregate -CREATE TABLE conditions_int (time int NOT NULL, device int, temp float); -SELECT create_hypertable('conditions_int', 'time', chunk_time_interval => 20); - -INSERT INTO conditions_int -SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::int, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 -FROM generate_series(1, 100, 1) t; - -CREATE OR REPLACE FUNCTION int_now() -RETURNS int LANGUAGE SQL STABLE AS -$$ - SELECT coalesce(max(time), 0) - FROM conditions_int -$$; - -SELECT set_integer_now_func('conditions_int', 'int_now'); - -CREATE MATERIALIZED VIEW cond_20_int -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket(INT '20', time) AS bucket, device, avg(temp) AS avg_temp -FROM conditions_int -GROUP BY 1,2 WITH NO DATA; - -CALL refresh_continuous_aggregate('cond_20_int', 0, 65); - -SELECT * FROM cond_20_int -ORDER BY 1,2; - --- Try max refresh window size -CALL refresh_continuous_aggregate('cond_20_int', NULL, NULL); - --- Test bigint-based continuous aggregate -CREATE TABLE conditions_bigint (time bigint NOT NULL, device int, temp float); -SELECT create_hypertable('conditions_bigint', 'time', chunk_time_interval => 20); - -INSERT INTO conditions_bigint -SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::bigint, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 -FROM generate_series(1, 100, 1) t; - -CREATE OR REPLACE FUNCTION bigint_now() -RETURNS bigint LANGUAGE SQL STABLE AS -$$ - SELECT coalesce(max(time), 0)::bigint - FROM conditions_bigint -$$; - -SELECT set_integer_now_func('conditions_bigint', 'bigint_now'); - -CREATE MATERIALIZED VIEW cond_20_bigint -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket(BIGINT '20', time) AS bucket, device, avg(temp) AS avg_temp -FROM conditions_bigint -GROUP BY 1,2 WITH NO DATA; - -CALL refresh_continuous_aggregate('cond_20_bigint', 0, 75); - -SELECT * FROM cond_20_bigint -ORDER BY 1,2; - --- Try max refresh window size -CALL refresh_continuous_aggregate('cond_20_bigint', NULL, NULL); - --- Test that WITH NO DATA and WITH DATA works (we use whatever is the --- default for Postgres, so we do not need to have test for the --- default). - -CREATE MATERIALIZED VIEW weekly_temp_without_data -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp -FROM conditions -GROUP BY 1,2 WITH NO DATA; - -CREATE MATERIALIZED VIEW weekly_temp_with_data -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp -FROM conditions -GROUP BY 1,2 WITH DATA; - -SELECT * FROM weekly_temp_without_data; -SELECT * FROM weekly_temp_with_data ORDER BY 1,2; - -\set ON_ERROR_STOP 0 --- REFRESH MATERIALIZED VIEW is blocked on continuous aggregates -REFRESH MATERIALIZED VIEW weekly_temp_without_data; - --- These should fail since we do not allow refreshing inside a --- transaction, not even as part of CREATE MATERIALIZED VIEW. -DO LANGUAGE PLPGSQL $$ BEGIN -CREATE MATERIALIZED VIEW weekly_conditions -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp -FROM conditions -GROUP BY 1,2 WITH DATA; -END $$; - -BEGIN; -CREATE MATERIALIZED VIEW weekly_conditions -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp -FROM conditions -GROUP BY 1,2 WITH DATA; -COMMIT; - -\set ON_ERROR_STOP 1 - --- This should not fail since we do not refresh the continuous --- aggregate. -DO LANGUAGE PLPGSQL $$ BEGIN -CREATE MATERIALIZED VIEW weekly_conditions_1 -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp -FROM conditions -GROUP BY 1,2 WITH NO DATA; -END $$; - -BEGIN; -CREATE MATERIALIZED VIEW weekly_conditions_2 -WITH (timescaledb.continuous, - timescaledb.materialized_only=true) -AS -SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp -FROM conditions -GROUP BY 1,2 WITH NO DATA; -COMMIT; +\ir include/cagg_refresh_common.sql diff --git a/tsl/test/sql/cagg_refresh_using_merge.sql b/tsl/test/sql/cagg_refresh_using_merge.sql new file mode 100644 index 00000000000..d3310456b5e --- /dev/null +++ b/tsl/test/sql/cagg_refresh_using_merge.sql @@ -0,0 +1,8 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- Enable MERGE statements for continuous aggregate refresh +SET timescaledb.enable_merge_on_cagg_refresh TO ON; + +\ir include/cagg_refresh_common.sql diff --git a/tsl/test/sql/include/cagg_query_common.sql b/tsl/test/sql/include/cagg_query_common.sql new file mode 100644 index 00000000000..2d0d26e48af --- /dev/null +++ b/tsl/test/sql/include/cagg_query_common.sql @@ -0,0 +1,957 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +SELECT + format('%s/results/%s_results_view.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW", + format('%s/results/%s_results_view_hashagg.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_VIEW_HASHAGG", + format('%s/results/%s_results_table.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_TABLE" +\gset +SELECT format('\! diff %s %s', :'TEST_RESULTS_VIEW', :'TEST_RESULTS_TABLE') as "DIFF_CMD", + format('\! diff %s %s', :'TEST_RESULTS_VIEW_HASHAGG', :'TEST_RESULTS_TABLE') as "DIFF_CMD2" +\gset + + +\set EXPLAIN 'EXPLAIN (VERBOSE, COSTS OFF)' + +SET client_min_messages TO NOTICE; + +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); + +select table_name from create_hypertable( 'conditions', 'timec'); + +insert into conditions values ( '2018-01-01 09:20:00-08', 'SFO', 55, 45); +insert into conditions values ( '2018-01-02 09:30:00-08', 'por', 100, 100); +insert into conditions values ( '2018-01-02 09:20:00-08', 'SFO', 65, 45); +insert into conditions values ( '2018-01-02 09:10:00-08', 'NYC', 65, 45); +insert into conditions values ( '2018-11-01 09:20:00-08', 'NYC', 45, 30); +insert into conditions values ( '2018-11-01 10:40:00-08', 'NYC', 55, 35); +insert into conditions values ( '2018-11-01 11:50:00-08', 'NYC', 65, 40); +insert into conditions values ( '2018-11-01 12:10:00-08', 'NYC', 75, 45); +insert into conditions values ( '2018-11-01 13:10:00-08', 'NYC', 85, 50); +insert into conditions values ( '2018-11-02 09:20:00-08', 'NYC', 10, 10); +insert into conditions values ( '2018-11-02 10:30:00-08', 'NYC', 20, 15); +insert into conditions values ( '2018-11-02 11:40:00-08', 'NYC', null, null); +insert into conditions values ( '2018-11-03 09:50:00-08', 'NYC', null, null); + +create table location_tab( locid integer, locname text ); +insert into location_tab values( 1, 'SFO'); +insert into location_tab values( 2, 'NYC'); +insert into location_tab values( 3, 'por'); + +create materialized view mat_m1( location, timec, minl, sumt , sumh) +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by time_bucket('1day', timec), location WITH NO DATA; + +--compute time_bucketted max+bucket_width for the materialized view +SELECT time_bucket('1day' , q.timeval+ '1day'::interval) +FROM ( select max(timec)as timeval from conditions ) as q; +CALL refresh_continuous_aggregate('mat_m1', NULL, NULL); + +--test first/last +create materialized view mat_m2(location, timec, firsth, lasth, maxtemp, mintemp) +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, time_bucket('1day', timec), first(humidity, timec), last(humidity, timec), max(temperature), min(temperature) +from conditions +group by time_bucket('1day', timec), location WITH NO DATA; +--time that refresh assumes as now() for repeatability +SELECT time_bucket('1day' , q.timeval+ '1day'::interval) +FROM ( select max(timec)as timeval from conditions ) as q; +CALL refresh_continuous_aggregate('mat_m2', NULL, NULL); + +--normal view -- +create or replace view regview( location, timec, minl, sumt , sumh) +as +select location, time_bucket('1day', timec), min(location), sum(temperature),sum(humidity) +from conditions +group by location, time_bucket('1day', timec); + +set enable_hashagg = false; + +-- NO pushdown cases --- +--when we have addl. attrs in order by that are not in the +-- group by, we will still need a sort +:EXPLAIN +select * from mat_m1 order by sumh, sumt, minl, timec ; +:EXPLAIN +select * from regview order by timec desc; + +-- PUSHDOWN cases -- +-- all group by elts in order by , reorder group by elts to match +-- group by order +-- This should prevent an additional sort after GroupAggregate +:EXPLAIN +select * from mat_m1 order by timec desc, location; + +:EXPLAIN +select * from mat_m1 order by location, timec desc; + +:EXPLAIN +select * from mat_m1 order by location, timec asc; +:EXPLAIN +select * from mat_m1 where timec > '2018-10-01' order by timec desc; +-- outer sort is used by mat_m1 for grouping. But doesn't avoid a sort after the join --- +:EXPLAIN +select l.locid, mat_m1.* from mat_m1 , location_tab l where timec > '2018-10-01' and l.locname = mat_m1.location order by timec desc; + +:EXPLAIN +select * from mat_m2 where timec > '2018-10-01' order by timec desc; + +:EXPLAIN +select * from (select * from mat_m2 where timec > '2018-10-01' order by timec desc ) as q limit 1; + +:EXPLAIN +select * from (select * from mat_m2 where timec > '2018-10-01' order by timec desc , location asc nulls first) as q limit 1; + +--plans with CTE +:EXPLAIN +with m1 as ( +Select * from mat_m2 where timec > '2018-10-01' order by timec desc ) +select * from m1; + +-- should reorder mat_m1 group by only based on mat_m1 order-by +:EXPLAIN +select * from mat_m1, mat_m2 where mat_m1.timec > '2018-10-01' and mat_m1.timec = mat_m2.timec order by mat_m1.timec desc; +--should reorder only for mat_m1. +:EXPLAIN +select * from mat_m1, regview where mat_m1.timec > '2018-10-01' and mat_m1.timec = regview.timec order by mat_m1.timec desc; + +select l.locid, mat_m1.* from mat_m1 , location_tab l where timec > '2018-10-01' and l.locname = mat_m1.location order by timec desc; + +\set ECHO none +SET client_min_messages TO error; +\o :TEST_RESULTS_VIEW +select * from mat_m1 order by timec desc, location; +select * from mat_m1 order by location, timec desc; +select * from mat_m1 order by location, timec asc; +select * from mat_m1 where timec > '2018-10-01' order by timec desc; +select * from mat_m2 where timec > '2018-10-01' order by timec desc; +\o +RESET client_min_messages; +\set ECHO all + +---- Run the same queries with hash agg enabled now +set enable_hashagg = true; +\set ECHO none +SET client_min_messages TO error; +\o :TEST_RESULTS_VIEW_HASHAGG +select * from mat_m1 order by timec desc, location; +select * from mat_m1 order by location, timec desc; +select * from mat_m1 order by location, timec asc; +select * from mat_m1 where timec > '2018-10-01' order by timec desc; +select * from mat_m2 where timec > '2018-10-01' order by timec desc; +\o +RESET client_min_messages; +\set ECHO all + +--- Run the queries directly on the table now +set enable_hashagg = true; +\set ECHO none +SET client_min_messages TO error; +\o :TEST_RESULTS_TABLE +SELECT location, time_bucket('1day', timec) as timec, min(location) as minl, sum(temperature) as sumt, sum(humidity) as sumh from conditions group by time_bucket('1day', timec) , location +order by timec desc, location; +SELECT location, time_bucket('1day', timec) as timec, min(location) as minl, sum(temperature) as sumt, sum(humidity) as sumh from conditions group by time_bucket('1day', timec) , location +order by location, timec desc; +SELECT location, time_bucket('1day', timec) as timec, min(location) as minl, sum(temperature) as sumt, sum(humidity) as sumh from conditions group by time_bucket('1day', timec) , location +order by location, timec asc; +select * from (SELECT location, time_bucket('1day', timec) as timec, min(location) as minl, sum(temperature) as sumt, sum(humidity) as sumh from conditions +group by time_bucket('1day', timec) , location ) as q +where timec > '2018-10-01' order by timec desc; +--comparison for mat_m2 queries +select * from ( +select location, time_bucket('1day', timec) as timec, first(humidity, timec) firsth, last(humidity, timec) lasth, max(temperature) maxtemp, min(temperature) mintemp +from conditions +group by time_bucket('1day', timec), location) as q +where timec > '2018-10-01' order by timec desc limit 10; +\o +RESET client_min_messages; +\set ECHO all + +-- diff results view select and table select +:DIFF_CMD +:DIFF_CMD2 + +--check if the guc works , reordering will not work +set timescaledb.enable_cagg_reorder_groupby = false; +set enable_hashagg = false; +:EXPLAIN +select * from mat_m1 order by timec desc, location; + +----------------------------------------------------------------------- +-- Test the cagg_watermark function. The watermark gives the point +-- where to UNION raw and materialized data in real-time +-- aggregation. Specifically, test that the watermark caching works as +-- expected. +----------------------------------------------------------------------- + +-- Insert some more data so that there is something to UNION in +-- real-time aggregation. + +insert into conditions values ( '2018-12-02 20:10:00-08', 'SFO', 55, 45); +insert into conditions values ( '2018-12-02 21:20:00-08', 'SFO', 65, 45); +insert into conditions values ( '2018-12-02 20:30:00-08', 'NYC', 65, 45); +insert into conditions values ( '2018-12-02 21:50:00-08', 'NYC', 45, 30); + +-- Test join of two caggs. Joining two caggs will force the cache to +-- reset every time the watermark function is invoked on a different +-- cagg in the same query. +SELECT mat_hypertable_id AS mat_id, + raw_hypertable_id AS raw_id, + schema_name AS mat_schema, + table_name AS mat_name, + format('%I.%I', schema_name, table_name) AS mat_table +FROM _timescaledb_catalog.continuous_agg ca, _timescaledb_catalog.hypertable h +WHERE user_view_name='mat_m1' +AND h.id = ca.mat_hypertable_id \gset + +BEGIN; + +-- Query without join +SELECT m1.location, m1.timec, sumt, sumh +FROM mat_m1 m1 +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + +-- Query that joins two caggs. This should force the watermark cache +-- to reset when the materialized hypertable ID changes. A hash join +-- could potentially read all values from mat_m1 then all values from +-- mat_m2. This would be the optimal situation for cagg_watermark +-- caching. We want to avoid it in tests to see that caching doesn't +-- do anything wrong in worse situations (e.g., a nested loop join). +SET enable_hashjoin=false; + +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + +-- Show the current watermark +SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_id)); + +-- The watermark should, in this case, be the same as the invalidation +-- threshold +SELECT _timescaledb_functions.to_timestamp(watermark) +FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :raw_id; + +-- The watermark is the end of materialization (end of last bucket) +-- while the MAX is the start of the last bucket +SELECT max(timec) FROM :mat_table; + +-- Drop the most recent chunk +SELECT chunk_name, range_start, range_end +FROM timescaledb_information.chunks +WHERE hypertable_name = :'mat_name'; + +SELECT drop_chunks('mat_m1', newer_than=>'2018-01-01'::timestamptz); + +SELECT chunk_name, range_start, range_end +FROM timescaledb_information.chunks +WHERE hypertable_name = :'mat_name'; + +-- The watermark should be updated to reflect the dropped data (i.e., +-- the cache should be reset) +SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_id)); + +-- Since we removed the last chunk, the invalidation threshold doesn't +-- move back, while the watermark does. +SELECT _timescaledb_functions.to_timestamp(watermark) +FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold +WHERE hypertable_id = :raw_id; + +-- Compare the new watermark to the MAX time in the table +SELECT max(timec) FROM :mat_table; + +-- Try a subtransaction +SAVEPOINT clear_cagg; + +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C", m1.timec DESC +LIMIT 10; + +ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); + +SELECT m1.location, m1.timec, sumt, sumh, firsth, lasth, maxtemp, mintemp +FROM mat_m1 m1 RIGHT JOIN mat_m2 m2 +ON (m1.location = m2.location +AND m1.timec = m2.timec) +ORDER BY m1.location COLLATE "C" NULLS LAST, m1.timec DESC NULLS LAST, firsth NULLS LAST, + lasth NULLS LAST, mintemp NULLS LAST, maxtemp NULLS LAST +LIMIT 10; + +ROLLBACK; + +----- +-- Tests with time_bucket and offset/origin +----- +CREATE TABLE temperature ( + time timestamptz NOT NULL, + value float +); + +SELECT create_hypertable('temperature', 'time'); + +INSERT INTO temperature VALUES ('2000-01-01 01:00:00'::timestamptz, 5); + +CREATE TABLE temperature_wo_tz ( + time timestamp NOT NULL, + value float +); + +SELECT create_hypertable('temperature_wo_tz', 'time'); + +INSERT INTO temperature_wo_tz VALUES ('2000-01-01 01:00:00'::timestamp, 5); + +CREATE TABLE temperature_date ( + time date NOT NULL, + value float +); + +SELECT create_hypertable('temperature_date', 'time'); + +INSERT INTO temperature_date VALUES ('2000-01-01 01:00:00'::timestamp, 5); + +-- Integer based tables +CREATE TABLE table_smallint ( + time smallint, + data smallint +); + +CREATE TABLE table_int ( + time int, + data int +); + +CREATE TABLE table_bigint ( + time bigint, + data bigint +); + +SELECT create_hypertable('table_smallint', 'time', chunk_time_interval => 10); +SELECT create_hypertable('table_int', 'time', chunk_time_interval => 10); +SELECT create_hypertable('table_bigint', 'time', chunk_time_interval => 10); + +CREATE OR REPLACE FUNCTION integer_now_smallint() returns smallint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_smallint $$; +CREATE OR REPLACE FUNCTION integer_now_int() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_int $$; +CREATE OR REPLACE FUNCTION integer_now_bigint() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM table_bigint $$; + +SELECT set_integer_now_func('table_smallint', 'integer_now_smallint'); +SELECT set_integer_now_func('table_int', 'integer_now_int'); +SELECT set_integer_now_func('table_bigint', 'integer_now_bigint'); + +INSERT INTO table_smallint VALUES(1,2); +INSERT INTO table_int VALUES(1,2); +INSERT INTO table_bigint VALUES(1,2); + +CREATE VIEW caggs_info AS +SELECT user_view_schema, user_view_name, bucket_func, bucket_width, bucket_origin, bucket_offset, bucket_timezone, bucket_fixed_width +FROM _timescaledb_catalog.continuous_aggs_bucket_function NATURAL JOIN _timescaledb_catalog.continuous_agg; + +--- +-- Tests with CAgg creation +--- +CREATE MATERIALIZED VIEW cagg_4_hours + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours'; +DROP MATERIALIZED VIEW cagg_4_hours; + +CREATE MATERIALIZED VIEW cagg_4_hours_offset + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '30m'::interval), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset'; +DROP MATERIALIZED VIEW cagg_4_hours_offset; + +CREATE MATERIALIZED VIEW cagg_4_hours_offset2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset2'; +DROP MATERIALIZED VIEW cagg_4_hours_offset2; + +-- Variable buckets (timezone is provided) with offset +CREATE MATERIALIZED VIEW cagg_4_hours_offset_ts + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, timezone=>'UTC'), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_ts'; +DROP MATERIALIZED VIEW cagg_4_hours_offset_ts; + +CREATE MATERIALIZED VIEW cagg_4_hours_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin'; +DROP MATERIALIZED VIEW cagg_4_hours_origin; + +-- Using named parameter +CREATE MATERIALIZED VIEW cagg_4_hours_origin2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin2'; +DROP MATERIALIZED VIEW cagg_4_hours_origin2; + +-- Variable buckets (timezone is provided) with origin +CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00 PST'::timestamptz, timezone=>'UTC'), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts'; +DROP MATERIALIZED VIEW cagg_4_hours_origin_ts; + +-- Without named parameter +CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, 'UTC', '2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts2'; +DROP MATERIALIZED VIEW cagg_4_hours_origin_ts2; + +-- Timestamp based CAggs +CREATE MATERIALIZED VIEW cagg_4_hours_wo_tz + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time), max(value) + FROM temperature_wo_tz + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_wo_tz'; + +CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00'::timestamp), max(value) + FROM temperature_wo_tz + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz'; +DROP MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz; + +-- Variable buckets (timezone is provided) with origin +CREATE MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, origin=>'2000-01-01 01:00:00'::timestamp), max(value) + FROM temperature_wo_tz + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_origin_ts_wo_tz2'; +DROP MATERIALIZED VIEW cagg_4_hours_origin_ts_wo_tz2; + +CREATE MATERIALIZED VIEW cagg_4_hours_offset_wo_tz + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval), max(value) + FROM temperature_wo_tz + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_offset_wo_tz'; +DROP MATERIALIZED VIEW cagg_4_hours_offset_wo_tz; +DROP MATERIALIZED VIEW cagg_4_hours_wo_tz; + +-- Date based CAggs +CREATE MATERIALIZED VIEW cagg_4_hours_date + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 days', time), max(value) + FROM temperature_date + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date'; +DROP MATERIALIZED VIEW cagg_4_hours_date; + +CREATE MATERIALIZED VIEW cagg_4_hours_date_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 days', time, '2000-01-01'::date), max(value) + FROM temperature_date + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin'; +DROP MATERIALIZED VIEW cagg_4_hours_date_origin; + +CREATE MATERIALIZED VIEW cagg_4_hours_date_origin2 + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 days', time, origin=>'2000-01-01'::date), max(value) + FROM temperature_date + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_origin2'; +DROP MATERIALIZED VIEW cagg_4_hours_date_origin2; + +CREATE MATERIALIZED VIEW cagg_4_hours_date_offset + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 days', time, "offset"=>'30m'::interval), max(value) + FROM temperature_date + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_4_hours_date_offset'; +DROP MATERIALIZED VIEW cagg_4_hours_date_offset; + +-- Integer based CAggs +CREATE MATERIALIZED VIEW cagg_smallint + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM table_smallint + GROUP BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint'; +DROP MATERIALIZED VIEW cagg_smallint; + +CREATE MATERIALIZED VIEW cagg_smallint_offset + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time, "offset"=>1::smallint), SUM(data) as value + FROM table_smallint + GROUP BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_smallint_offset'; +DROP MATERIALIZED VIEW cagg_smallint_offset; + +CREATE MATERIALIZED VIEW cagg_int + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM table_int + GROUP BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int'; +DROP MATERIALIZED VIEW cagg_int; + +CREATE MATERIALIZED VIEW cagg_int_offset + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time, "offset"=>1::int), SUM(data) as value + FROM table_int + GROUP BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_int_offset'; +DROP MATERIALIZED VIEW cagg_int_offset; + +CREATE MATERIALIZED VIEW cagg_bigint + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time), SUM(data) as value + FROM table_bigint + GROUP BY 1 WITH NO DATA; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint'; +DROP MATERIALIZED VIEW cagg_bigint; + +CREATE MATERIALIZED VIEW cagg_bigint_offset + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time, "offset"=>1::bigint), SUM(data) as value + FROM table_bigint + GROUP BY 1 WITH NO DATA; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint_offset'; +DROP MATERIALIZED VIEW cagg_bigint_offset; + +-- Without named parameter +CREATE MATERIALIZED VIEW cagg_bigint_offset2 + WITH (timescaledb.continuous, timescaledb.materialized_only=true) + AS SELECT time_bucket('2', time, 1::bigint), SUM(data) as value + FROM table_bigint + GROUP BY 1 WITH NO DATA; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_bigint_offset2'; + +-- mess with the bucket_func signature to make sure it will raise an exception +SET ROLE :ROLE_CLUSTER_SUPERUSER; +\set ON_ERROR_STOP 0 +BEGIN; +UPDATE _timescaledb_catalog.continuous_aggs_bucket_function SET bucket_func = 'func_does_not_exist()'; +-- should error because function does not exist +CALL refresh_continuous_aggregate('cagg_bigint_offset2', NULL, NULL); +ROLLBACK; +\set ON_ERROR_STOP 1 +SET ROLE :ROLE_DEFAULT_PERM_USER; + +DROP MATERIALIZED VIEW cagg_bigint_offset2; + +-- Test invalid bucket definitions +\set ON_ERROR_STOP 0 +-- Offset and origin at the same time is not allowed (function does not exists) +CREATE MATERIALIZED VIEW cagg_4_hours_offset_and_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, origin=>'2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; + +-- Offset and origin at the same time is not allowed (function does exists but invalid parameter combination) +CREATE MATERIALIZED VIEW cagg_4_hours_offset_and_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, "offset"=>'30m'::interval, origin=>'2000-01-01 01:00:00 PST'::timestamptz, timezone=>'UTC'), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; +\set ON_ERROR_STOP 1 + +--- +-- Tests with CAgg processing +--- + +-- Check used timezone +SHOW timezone; + +-- Populate it +INSERT INTO temperature + SELECT time, 5 + FROM generate_series('2000-01-01 01:00:00 PST'::timestamptz, + '2000-01-01 23:59:59 PST','1m') time; + +INSERT INTO temperature + SELECT time, 6 + FROM generate_series('2020-01-01 00:00:00 PST'::timestamptz, + '2020-01-01 23:59:59 PST','1m') time; + +-- Create CAggs +CREATE MATERIALIZED VIEW cagg_4_hours + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; + +CREATE MATERIALIZED VIEW cagg_4_hours_offset + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '30m'::interval), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; + +-- Align origin with first value +CREATE MATERIALIZED VIEW cagg_4_hours_origin + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; + +-- Query the CAggs and check that all buckets are materialized +SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; +SELECT * FROM cagg_4_hours; +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours; + +SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; +SELECT * FROM cagg_4_hours_offset; +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_offset; + +SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; +SELECT * FROM cagg_4_hours_origin; +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_origin; + +-- Update the last bucket and re-materialize +INSERT INTO temperature values('2020-01-01 23:55:00 PST', 10); + +CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); + +SELECT * FROM cagg_4_hours; +SELECT * FROM cagg_4_hours_offset; +SELECT * FROM cagg_4_hours_origin; + +-- Check the real-time functionality +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=false); + +-- Check watermarks +SELECT continuous_agg.user_view_name, continuous_aggs_watermark.watermark, _timescaledb_functions.to_timestamp(watermark) + FROM _timescaledb_catalog.continuous_aggs_watermark + JOIN _timescaledb_catalog.continuous_agg USING (mat_hypertable_id) +WHERE user_view_name LIKE 'cagg_4_hours%' +ORDER BY mat_hypertable_id, watermark; + +-- Insert new data +INSERT INTO temperature values('2020-01-02 00:10:00 PST', 2222); +INSERT INTO temperature values('2020-01-02 05:35:00 PST', 5555); +INSERT INTO temperature values('2020-01-02 09:05:00 PST', 8888); + +-- Watermark is at Thu Jan 02 00:00:00 2020 PST - all inserted tuples should be seen +SELECT * FROM cagg_4_hours; + +-- Watermark is at Thu Jan 02 00:30:00 2020 PST - only two inserted tuples should be seen +SELECT * FROM cagg_4_hours_offset; + +-- Watermark is at Thu Jan 02 01:00:00 2020 PST - only two inserted tuples should be seen +SELECT * FROM cagg_4_hours_origin; + +-- Update materialized data +SET client_min_messages TO DEBUG1; +CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +RESET client_min_messages; + +-- Query the CAggs and check that all buckets are materialized +SELECT * FROM cagg_4_hours; +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours; +SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +SELECT * FROM cagg_4_hours_offset; +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_offset; +SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +SELECT * FROM cagg_4_hours_origin; +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_origin; +SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +-- Test invalidations +TRUNCATE temperature; +CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); + +INSERT INTO temperature + SELECT time, 5 + FROM generate_series('2000-01-01 01:00:00 PST'::timestamptz, + '2000-01-01 23:59:59 PST','1m') time; + +INSERT INTO temperature + SELECT time, 6 + FROM generate_series('2020-01-01 00:00:00 PST'::timestamptz, + '2020-01-01 23:59:59 PST','1m') time; + +INSERT INTO temperature values('2020-01-02 01:05:00+01', 2222); +INSERT INTO temperature values('2020-01-02 01:35:00+01', 5555); +INSERT INTO temperature values('2020-01-02 05:05:00+01', 8888); + +SET client_min_messages TO DEBUG1; +CALL refresh_continuous_aggregate('cagg_4_hours', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_offset', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_4_hours_origin', NULL, NULL); +RESET client_min_messages; + +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours; +ALTER MATERIALIZED VIEW cagg_4_hours SET (timescaledb.materialized_only=false); +SELECT * FROM cagg_4_hours; +SELECT time_bucket('4 hour', time), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_offset; +ALTER MATERIALIZED VIEW cagg_4_hours_offset SET (timescaledb.materialized_only=false); +SELECT * FROM cagg_4_hours_offset; +SELECT time_bucket('4 hour', time, '30m'::interval), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=true); +SELECT * FROM cagg_4_hours_origin; +ALTER MATERIALIZED VIEW cagg_4_hours_origin SET (timescaledb.materialized_only=false); +SELECT * FROM cagg_4_hours_origin; +SELECT time_bucket('4 hour', time, '2000-01-01 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +--- Test with variable width buckets (use February, since hourly origins are not supported with variable sized buckets) +TRUNCATE temperature; +INSERT INTO temperature + SELECT time, 5 + FROM generate_series('2000-02-01 01:00:00 PST'::timestamptz, + '2000-02-01 23:59:59 PST','1m') time; + +INSERT INTO temperature + SELECT time, 6 + FROM generate_series('2020-02-01 01:00:00 PST'::timestamptz, + '2020-02-01 23:59:59 PST','1m') time; + +SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log ORDER BY 1, 2, 3; + +CREATE MATERIALIZED VIEW cagg_1_year + WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + SELECT time_bucket('1 year', time), max(value) + FROM temperature + GROUP BY 1 ORDER BY 1; + +SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log ORDER BY 1, 2, 3; + +--- +-- Tests with integer based hypertables +--- +TRUNCATE table_int; + +INSERT INTO table_int + SELECT time, 5 + FROM generate_series(-50, 50) time; + +CREATE MATERIALIZED VIEW cagg_int + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS SELECT time_bucket('10', time), SUM(data) as value + FROM table_int + GROUP BY 1 ORDER BY 1; + +CREATE MATERIALIZED VIEW cagg_int_offset + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS SELECT time_bucket('10', time, "offset"=>5), SUM(data) as value + FROM table_int + GROUP BY 1 ORDER BY 1; + +-- Compare bucketing results +SELECT time_bucket('10', time), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; +SELECT * FROM cagg_int; + +SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; +SELECT * FROM cagg_int_offset; + +-- Update table +INSERT INTO table_int VALUES(51, 100); +INSERT INTO table_int VALUES(100, 555); + +-- Compare bucketing results +SELECT time_bucket('10', time), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; +SELECT * FROM cagg_int; +CALL refresh_continuous_aggregate('cagg_int', NULL, NULL); +SELECT * FROM cagg_int; + +SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; +SELECT * FROM cagg_int_offset; -- the value 100 is part of the already serialized bucket, so it should not be visible +CALL refresh_continuous_aggregate('cagg_int_offset', NULL, NULL); +SELECT * FROM cagg_int_offset; + +-- Ensure everything was materialized +ALTER MATERIALIZED VIEW cagg_int SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW cagg_int_offset SET (timescaledb.materialized_only=true); + +SELECT * FROM cagg_int; +SELECT * FROM cagg_int_offset; + +-- Check that the refresh is properly aligned +INSERT INTO table_int VALUES(114, 0); + +SET client_min_messages TO DEBUG1; +CALL refresh_continuous_aggregate('cagg_int_offset', 110, 130); +RESET client_min_messages; + +SELECT * FROM cagg_int_offset; +SELECT time_bucket('10', time, "offset"=>5), SUM(data) FROM table_int GROUP BY 1 ORDER BY 1; + +-- Variable sized buckets with origin +CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 year', time, origin=>'2000-01-01 01:05:00 UTC'::timestamptz, timezone=>'UTC') AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin'; +DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin; + +-- Variable due to the used timezone +CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin2 + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 hour', time, origin=>'2000-01-01 01:05:00 UTC'::timestamptz, timezone=>'UTC') AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin2'; +DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin2; + +-- Variable with offset +CREATE MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin3 + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 year', time, "offset"=>'5 minutes'::interval) AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_variable_bucket_fixed_origin3'; +DROP MATERIALIZED VIEW cagg_1_hour_variable_bucket_fixed_origin3; + +--- +-- Test with blocking a few broken configurations +--- +\set ON_ERROR_STOP 0 + +-- Unfortunately '\set VERBOSITY verbose' cannot be used here to check the error details +-- since it also prints the line number of the location, which is depended on the build + +-- Different time origin +CREATE MATERIALIZED VIEW cagg_1_hour_origin + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 hour', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; + +CREATE MATERIALIZED VIEW cagg_1_week_origin + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 week', hour_bucket, origin=>'2022-01-02 01:00:00 PST'::timestamptz) AS week_bucket, max(max_value) AS max_value + FROM cagg_1_hour_origin + GROUP BY 1 ORDER BY 1; + +-- Different time offset +CREATE MATERIALIZED VIEW cagg_1_hour_offset + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 hour', time, "offset"=>'30m'::interval) AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; + +CREATE MATERIALIZED VIEW cagg_1_week_offset + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 week', hour_bucket, "offset"=>'35m'::interval) AS week_bucket, max(max_value) AS max_value + FROM cagg_1_hour_offset + GROUP BY 1 ORDER BY 1; + +-- Different integer offset +CREATE MATERIALIZED VIEW cagg_int_offset_5 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS SELECT time_bucket('10', time, "offset"=>5) AS time, SUM(data) AS value + FROM table_int + GROUP BY 1 ORDER BY 1; + +CREATE MATERIALIZED VIEW cagg_int_offset_10 + WITH (timescaledb.continuous, timescaledb.materialized_only=false) + AS SELECT time_bucket('10', time, "offset"=>10) AS time, SUM(value) AS value + FROM cagg_int_offset_5 + GROUP BY 1 ORDER BY 1; + +\set ON_ERROR_STOP 1 + +DROP MATERIALIZED VIEW cagg_1_hour_origin; +DROP MATERIALIZED VIEW cagg_1_hour_offset; +DROP MATERIALIZED VIEW cagg_int_offset_5; + +--- +-- CAGGs on CAGGs tests +--- +CREATE MATERIALIZED VIEW cagg_1_hour_offset + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 hour', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS hour_bucket, max(value) AS max_value + FROM temperature + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_hour_offset'; + +CREATE MATERIALIZED VIEW cagg_1_week_offset + WITH (timescaledb.continuous) AS + SELECT time_bucket('1 week', hour_bucket, origin=>'2000-01-02 01:00:00 PST'::timestamptz) AS week_bucket, max(max_value) AS max_value + FROM cagg_1_hour_offset + GROUP BY 1 ORDER BY 1; +SELECT * FROM caggs_info WHERE user_view_name = 'cagg_1_week_offset'; + +-- Compare output +SELECT * FROM cagg_1_week_offset; +SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +INSERT INTO temperature values('2030-01-01 05:05:00 PST', 22222); +INSERT INTO temperature values('2030-01-03 05:05:00 PST', 55555); + +-- Compare real-time functionality +ALTER MATERIALIZED VIEW cagg_1_hour_offset SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW cagg_1_week_offset SET (timescaledb.materialized_only=false); + +SELECT * FROM cagg_1_week_offset; +SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +-- Test refresh +CALL refresh_continuous_aggregate('cagg_1_hour_offset', NULL, NULL); +CALL refresh_continuous_aggregate('cagg_1_week_offset', NULL, NULL); + +-- Everything should be now materailized +ALTER MATERIALIZED VIEW cagg_1_hour_offset SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW cagg_1_week_offset SET (timescaledb.materialized_only=false); + +SELECT * FROM cagg_1_week_offset; +SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +TRUNCATE temperature; + +SELECT * FROM cagg_1_week_offset; +SELECT time_bucket('1 week', time, origin=>'2000-01-02 01:00:00 PST'::timestamptz), max(value) FROM temperature GROUP BY 1 ORDER BY 1; + +DROP VIEW caggs_info; diff --git a/tsl/test/sql/include/cagg_refresh_common.sql b/tsl/test/sql/include/cagg_refresh_common.sql new file mode 100644 index 00000000000..83400d9a245 --- /dev/null +++ b/tsl/test/sql/include/cagg_refresh_common.sql @@ -0,0 +1,308 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +CREATE TABLE conditions (time timestamptz NOT NULL, device int, temp float); +SELECT create_hypertable('conditions', 'time'); + +SELECT setseed(.12); + +INSERT INTO conditions +SELECT t, ceil(abs(timestamp_hash(t::timestamp))%4)::int, abs(timestamp_hash(t::timestamp))%40 +FROM generate_series('2020-05-01', '2020-05-05', '10 minutes'::interval) t; + +-- Show the most recent data +SELECT * FROM conditions +ORDER BY time DESC, device +LIMIT 10; + +CREATE MATERIALIZED VIEW daily_temp +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; + +-- The continuous aggregate should be empty +SELECT * FROM daily_temp +ORDER BY day DESC, device; + +-- Refresh one bucket (1 day): +SHOW timezone; +-- The refresh of a single bucket must align with the start of the day +-- in the bucket's time zone (which is UTC, since time_bucket doesn't +-- support time zone arg) +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 UTC', '2020-05-04 00:00 UTC'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 17:00 PDT', '2020-05-04 17:00 PDT'); + +\set ON_ERROR_STOP 0 +\set VERBOSITY default +-- These refreshes will fail since they don't align with the bucket's +-- time zone +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', '2020-05-04'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 PDT', '2020-05-04 00:00 PDT'); + +-- Refresh window less than one bucket +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 00:00 UTC', '2020-05-03 23:59 UTC'); +-- Refresh window bigger than one bucket, but failing since it is not +-- aligned with bucket boundaries so that it covers a full bucket: +-- +-- Refresh window: [----------) +-- Buckets: [------|------] +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03 01:00 UTC', '2020-05-04 08:00 UTC'); +\set VERBOSITY terse +\set ON_ERROR_STOP 1 + +-- Refresh the most recent few days: +CALL refresh_continuous_aggregate('daily_temp', '2020-05-02', '2020-05-05 17:00'); + +SELECT * FROM daily_temp +ORDER BY day DESC, device; + +-- Refresh the rest (and try DEBUG output) +SET client_min_messages TO DEBUG1; +CALL refresh_continuous_aggregate('daily_temp', '2020-04-30', '2020-05-04'); +RESET client_min_messages; + +-- Compare the aggregate to the equivalent query on the source table +SELECT * FROM daily_temp +ORDER BY day DESC, device; + +SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 +ORDER BY 1 DESC,2; + +-- Test unusual, but valid input +CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::timestamptz, '2020-05-03'::date); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::date, '2020-05-03'::date); + +-- Unbounded window forward in time +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', NULL); +CALL refresh_continuous_aggregate('daily_temp', NULL, NULL); + +-- Unbounded window back in time +CALL refresh_continuous_aggregate('daily_temp', NULL, '2020-05-01'); + +-- Test bad input +\set ON_ERROR_STOP 0 +-- Bad continuous aggregate name +CALL refresh_continuous_aggregate(NULL, '2020-05-03', '2020-05-05'); +CALL refresh_continuous_aggregate('xyz', '2020-05-03', '2020-05-05'); +-- Valid object, but not a continuous aggregate +CALL refresh_continuous_aggregate('conditions', '2020-05-03', '2020-05-05'); +-- Object ID with no object +CALL refresh_continuous_aggregate(1, '2020-05-03', '2020-05-05'); +-- Lacking arguments +CALL refresh_continuous_aggregate('daily_temp'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03'); +-- Bad time ranges +CALL refresh_continuous_aggregate('daily_temp', 'xyz', '2020-05-05'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', 'xyz'); +CALL refresh_continuous_aggregate('daily_temp', '2020-05-03', '2020-05-01'); +-- Bad time input +CALL refresh_continuous_aggregate('daily_temp', '2020-05-01'::text, '2020-05-03'::text); +CALL refresh_continuous_aggregate('daily_temp', 0, '2020-05-01'); +\set ON_ERROR_STOP 1 + +-- Test different time types +CREATE TABLE conditions_date (time date NOT NULL, device int, temp float); +SELECT create_hypertable('conditions_date', 'time'); + +CREATE MATERIALIZED VIEW daily_temp_date +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('1 day', time) AS day, device, avg(temp) AS avg_temp +FROM conditions_date +GROUP BY 1,2 WITH NO DATA; + +CALL refresh_continuous_aggregate('daily_temp_date', '2020-05-01', '2020-05-03'); + +-- Try max refresh window size +CALL refresh_continuous_aggregate('daily_temp_date', NULL, NULL); + +-- Test smallint-based continuous aggregate +CREATE TABLE conditions_smallint (time smallint NOT NULL, device int, temp float); +SELECT create_hypertable('conditions_smallint', 'time', chunk_time_interval => 20); + +INSERT INTO conditions_smallint +SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::smallint, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 +FROM generate_series(1, 100, 1) t; + +CREATE OR REPLACE FUNCTION smallint_now() +RETURNS smallint LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0)::smallint + FROM conditions_smallint +$$; + +\set ON_ERROR_STOP 0 +-- First try to create an integer-based continuous aggregate without +-- an now function. This should not be allowed. +CREATE MATERIALIZED VIEW cond_20_smallint +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions_smallint c +GROUP BY 1,2 WITH NO DATA; +\set ON_ERROR_STOP 1 + +SELECT set_integer_now_func('conditions_smallint', 'smallint_now'); + +CREATE MATERIALIZED VIEW cond_20_smallint +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(SMALLINT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions_smallint c +GROUP BY 1,2 WITH NO DATA; + +CALL refresh_continuous_aggregate('cond_20_smallint', 0::smallint, 70::smallint); + +SELECT * FROM cond_20_smallint +ORDER BY 1,2; + +-- Try max refresh window size +CALL refresh_continuous_aggregate('cond_20_smallint', NULL, NULL); + +-- Test int-based continuous aggregate +CREATE TABLE conditions_int (time int NOT NULL, device int, temp float); +SELECT create_hypertable('conditions_int', 'time', chunk_time_interval => 20); + +INSERT INTO conditions_int +SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::int, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 +FROM generate_series(1, 100, 1) t; + +CREATE OR REPLACE FUNCTION int_now() +RETURNS int LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0) + FROM conditions_int +$$; + +SELECT set_integer_now_func('conditions_int', 'int_now'); + +CREATE MATERIALIZED VIEW cond_20_int +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(INT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions_int +GROUP BY 1,2 WITH NO DATA; + +CALL refresh_continuous_aggregate('cond_20_int', 0, 65); + +SELECT * FROM cond_20_int +ORDER BY 1,2; + +-- Try max refresh window size +CALL refresh_continuous_aggregate('cond_20_int', NULL, NULL); + +-- Test bigint-based continuous aggregate +CREATE TABLE conditions_bigint (time bigint NOT NULL, device int, temp float); +SELECT create_hypertable('conditions_bigint', 'time', chunk_time_interval => 20); + +INSERT INTO conditions_bigint +SELECT t, ceil(abs(timestamp_hash(to_timestamp(t)::timestamp))%4)::bigint, abs(timestamp_hash(to_timestamp(t)::timestamp))%40 +FROM generate_series(1, 100, 1) t; + +CREATE OR REPLACE FUNCTION bigint_now() +RETURNS bigint LANGUAGE SQL STABLE AS +$$ + SELECT coalesce(max(time), 0)::bigint + FROM conditions_bigint +$$; + +SELECT set_integer_now_func('conditions_bigint', 'bigint_now'); + +CREATE MATERIALIZED VIEW cond_20_bigint +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket(BIGINT '20', time) AS bucket, device, avg(temp) AS avg_temp +FROM conditions_bigint +GROUP BY 1,2 WITH NO DATA; + +CALL refresh_continuous_aggregate('cond_20_bigint', 0, 75); + +SELECT * FROM cond_20_bigint +ORDER BY 1,2; + +-- Try max refresh window size +CALL refresh_continuous_aggregate('cond_20_bigint', NULL, NULL); + +-- Test that WITH NO DATA and WITH DATA works (we use whatever is the +-- default for Postgres, so we do not need to have test for the +-- default). + +CREATE MATERIALIZED VIEW weekly_temp_without_data +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; + +CREATE MATERIALIZED VIEW weekly_temp_with_data +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH DATA; + +SELECT * FROM weekly_temp_without_data; +SELECT * FROM weekly_temp_with_data ORDER BY 1,2; + +\set ON_ERROR_STOP 0 +-- REFRESH MATERIALIZED VIEW is blocked on continuous aggregates +REFRESH MATERIALIZED VIEW weekly_temp_without_data; + +-- These should fail since we do not allow refreshing inside a +-- transaction, not even as part of CREATE MATERIALIZED VIEW. +DO LANGUAGE PLPGSQL $$ BEGIN +CREATE MATERIALIZED VIEW weekly_conditions +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH DATA; +END $$; + +BEGIN; +CREATE MATERIALIZED VIEW weekly_conditions +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH DATA; +COMMIT; + +\set ON_ERROR_STOP 1 + +-- This should not fail since we do not refresh the continuous +-- aggregate. +DO LANGUAGE PLPGSQL $$ BEGIN +CREATE MATERIALIZED VIEW weekly_conditions_1 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +END $$; + +BEGIN; +CREATE MATERIALIZED VIEW weekly_conditions_2 +WITH (timescaledb.continuous, + timescaledb.materialized_only=true) +AS +SELECT time_bucket('7 days', time) AS day, device, avg(temp) AS avg_temp +FROM conditions +GROUP BY 1,2 WITH NO DATA; +COMMIT;