diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.c b/tsl/src/nodes/decompress_chunk/decompress_chunk.c index ac2c7873422..bd97528c5a7 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_chunk.c +++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.c @@ -48,42 +48,35 @@ typedef struct SortInfo List *required_compressed_pathkeys; List *required_eq_classes; bool needs_sequence_num; - bool can_pushdown_sort; /* sort can be pushed below DecompressChunk */ + bool use_compressed_sort; /* sort can be pushed below DecompressChunk */ + bool use_batch_sorted_merge; bool reverse; -} SortInfo; -typedef enum MergeBatchResult -{ - MERGE_NOT_POSSIBLE, - SCAN_FORWARD, - SCAN_BACKWARD -} MergeBatchResult; + List *decompressed_sort_pathkeys; + QualCost decompressed_sort_pathkeys_cost; +} SortInfo; static RangeTblEntry *decompress_chunk_make_rte(Oid compressed_relid, LOCKMODE lockmode, Query *parse); static void create_compressed_scan_paths(PlannerInfo *root, RelOptInfo *compressed_rel, - CompressionInfo *info, SortInfo *sort_info); + const CompressionInfo *compression_info, + const SortInfo *sort_info); -static DecompressChunkPath *decompress_chunk_path_create(PlannerInfo *root, CompressionInfo *info, - int parallel_workers, - Path *compressed_path); +static DecompressChunkPath * +decompress_chunk_path_create(PlannerInfo *root, const CompressionInfo *info, Path *compressed_path); static void decompress_chunk_add_plannerinfo(PlannerInfo *root, CompressionInfo *info, const Chunk *chunk, RelOptInfo *chunk_rel, bool needs_sequence_num); -static SortInfo build_sortinfo(const Chunk *chunk, RelOptInfo *chunk_rel, CompressionInfo *info, - List *pathkeys); +static SortInfo build_sortinfo(PlannerInfo *root, const Chunk *chunk, RelOptInfo *chunk_rel, + const CompressionInfo *info, List *pathkeys); -static bool -is_compressed_column(CompressionInfo *info, Oid type) -{ - return type == info->compresseddata_oid; -} +static Bitmapset *find_const_segmentby(RelOptInfo *chunk_rel, const CompressionInfo *info); static EquivalenceClass * -append_ec_for_seqnum(PlannerInfo *root, CompressionInfo *info, SortInfo *sort_info, Var *var, - Oid sortop, bool nulls_first) +append_ec_for_seqnum(PlannerInfo *root, const CompressionInfo *info, const SortInfo *sort_info, + Var *var, Oid sortop, bool nulls_first) { MemoryContext oldcontext = MemoryContextSwitchTo(root->planner_cxt); @@ -151,7 +144,7 @@ append_ec_for_seqnum(PlannerInfo *root, CompressionInfo *info, SortInfo *sort_in } static EquivalenceClass * -append_ec_for_metadata_col(PlannerInfo *root, CompressionInfo *info, Var *var, PathKey *pk) +append_ec_for_metadata_col(PlannerInfo *root, const CompressionInfo *info, Var *var, PathKey *pk) { MemoryContext oldcontext = MemoryContextSwitchTo(root->planner_cxt); EquivalenceMember *em = makeNode(EquivalenceMember); @@ -186,9 +179,9 @@ append_ec_for_metadata_col(PlannerInfo *root, CompressionInfo *info, Var *var, P return ec; } -static void -build_compressed_scan_pathkeys(SortInfo *sort_info, PlannerInfo *root, List *chunk_pathkeys, - CompressionInfo *info) +static List * +build_compressed_scan_pathkeys(const SortInfo *sort_info, PlannerInfo *root, List *chunk_pathkeys, + const CompressionInfo *info) { Var *var; int varattno; @@ -353,7 +346,7 @@ build_compressed_scan_pathkeys(SortInfo *sort_info, PlannerInfo *root, List *chu } } } - sort_info->required_compressed_pathkeys = required_compressed_pathkeys; + return required_compressed_pathkeys; } DecompressChunkPath * @@ -377,8 +370,14 @@ build_compressioninfo(PlannerInfo *root, const Hypertable *ht, const Chunk *chun info->chunk_rel = chunk_rel; info->chunk_rte = planner_rt_fetch(chunk_rel->relid, root); - Oid relid = ts_chunk_get_relid(chunk->fd.compressed_chunk_id, true); - info->settings = ts_compression_settings_get(relid); + FormData_chunk compressed_fd = ts_chunk_get_formdata(chunk->fd.compressed_chunk_id); + info->compressed_reloid = ts_get_relation_relid(NameStr(compressed_fd.schema_name), + NameStr(compressed_fd.table_name), + /* return_invalid = */ false); + info->compression_hypertable_reloid = + ts_hypertable_id_to_relid(compressed_fd.hypertable_id, /* return_invalid = */ false); + + info->settings = ts_compression_settings_get(info->compressed_reloid); if (chunk_rel->reloptkind == RELOPT_OTHER_MEMBER_REL) { @@ -417,6 +416,18 @@ build_compressioninfo(PlannerInfo *root, const Hypertable *ht, const Chunk *chun get_attnum(info->settings->fd.relid, COMPRESSION_COLUMN_METADATA_SEQUENCE_NUM_NAME) != InvalidAttrNumber; + info->chunk_const_segmentby = find_const_segmentby(chunk_rel, info); + + /* + * If the chunk is member of hypertable expansion or a UNION, find its + * parent relation ids. We will use it later to filter out some parameterized + * paths. + */ + if (chunk_rel->reloptkind == RELOPT_OTHER_MEMBER_REL) + { + info->parent_relids = find_childrel_parents(root, chunk_rel); + } + return info; } @@ -457,11 +468,65 @@ smoothstep(double x, double start, double end) } /* - * Calculate the costs for retrieving the decompressed in-order using - * a binary heap. + * If the query 'order by' is prefix of the compression 'order by' (or equal), we can exploit + * the ordering of the individual batches to create a total ordered result without resorting + * the tuples. This speeds up all queries that use this ordering (because no sort node is + * needed). In particular, queries that use a LIMIT are speed-up because only the top elements + * of the affected batches needs to be decompressed. Without the optimization, the entire batches + * are decompressed, sorted, and then the top elements are taken from the result. + * + * The idea is to do something similar to the MergeAppend node; a BinaryHeap is used + * to merge the per segment by column sorted individual batches into a sorted result. So, we end + * up which a data flow which looks as follows: + * + * DecompressChunk + * * Decompress Batch 1 + * * Decompress Batch 2 + * * Decompress Batch 3 + * [....] + * * Decompress Batch N + * + * Using the presorted batches, we are able to open these batches dynamically. If we don't presort + * them, we would have to open all batches at the same time. This would be similar to the work the + * MergeAppend does, but this is not needed in our case and we could reduce the size of the heap and + * the amount of parallel open batches. + * + * The algorithm works as follows: + * + * (1) A sort node is placed below the decompress scan node and on top of the scan + * on the compressed chunk. This sort node uses the min/max values of the 'order by' + * columns from the metadata of the batch to get them into an order which can be + * used to merge them. + * + * [Scan on compressed chunk] -> [Sort on min/max values] -> [Decompress and merge] + * + * For example, the batches are sorted on the min value of the 'order by' metadata + * column: [0, 3] [0, 5] [3, 7] [6, 10] + * + * (2) The decompress chunk node initializes a binary heap, opens the first batch and + * decompresses the first tuple from the batch. The tuple is put on the heap. In addition + * the opened batch is marked as the most recent batch (MRB). + * + * (3) As soon as a tuple is requested from the heap, the following steps are performed: + * (3a) If the heap is empty, we are done. + * (3b) The top tuple from the heap is taken. It is checked if this tuple is from the + * MRB. If this is the case, the next batch is opened, the first tuple is decompressed, + * placed on the heap and this batch is marked as MRB. This is repeated until the + * top tuple from the heap is not from the MRB. After the top tuple is not from the + * MRB, all batches (and one ahead) which might contain the most recent tuple are + * opened and placed on the heap. + * + * In the example above, the first three batches are opened because the first two + * batches might contain tuples with a value of 0. + * (3c) The top element from the heap is removed, the next tuple from the batch is + * decompressed (if present) and placed on the heap. + * (3d) The former top tuple of the heap is returned. + * + * This function calculate the costs for retrieving the decompressed in-order + * using a binary heap. */ static void -cost_batch_sorted_merge(PlannerInfo *root, CompressionInfo *compression_info, +cost_batch_sorted_merge(PlannerInfo *root, const CompressionInfo *compression_info, DecompressChunkPath *dcpath, Path *compressed_path) { Path sort_path; /* dummy for result of cost_sort */ @@ -577,149 +642,6 @@ cost_batch_sorted_merge(PlannerInfo *root, CompressionInfo *compression_info, dcpath->custom_path.path.rows * uncompressed_row_cost; } -/* - * If the query 'order by' is prefix of the compression 'order by' (or equal), we can exploit - * the ordering of the individual batches to create a total ordered result without resorting - * the tuples. This speeds up all queries that use this ordering (because no sort node is - * needed). In particular, queries that use a LIMIT are speed-up because only the top elements - * of the affected batches needs to be decompressed. Without the optimization, the entire batches - * are decompressed, sorted, and then the top elements are taken from the result. - * - * The idea is to do something similar to the MergeAppend node; a BinaryHeap is used - * to merge the per segment by column sorted individual batches into a sorted result. So, we end - * up which a data flow which looks as follows: - * - * DecompressChunk - * * Decompress Batch 1 - * * Decompress Batch 2 - * * Decompress Batch 3 - * [....] - * * Decompress Batch N - * - * Using the presorted batches, we are able to open these batches dynamically. If we don't presort - * them, we would have to open all batches at the same time. This would be similar to the work the - * MergeAppend does, but this is not needed in our case and we could reduce the size of the heap and - * the amount of parallel open batches. - * - * The algorithm works as follows: - * - * (1) A sort node is placed below the decompress scan node and on top of the scan - * on the compressed chunk. This sort node uses the min/max values of the 'order by' - * columns from the metadata of the batch to get them into an order which can be - * used to merge them. - * - * [Scan on compressed chunk] -> [Sort on min/max values] -> [Decompress and merge] - * - * For example, the batches are sorted on the min value of the 'order by' metadata - * column: [0, 3] [0, 5] [3, 7] [6, 10] - * - * (2) The decompress chunk node initializes a binary heap, opens the first batch and - * decompresses the first tuple from the batch. The tuple is put on the heap. In addition - * the opened batch is marked as the most recent batch (MRB). - * - * (3) As soon as a tuple is requested from the heap, the following steps are performed: - * (3a) If the heap is empty, we are done. - * (3b) The top tuple from the heap is taken. It is checked if this tuple is from the - * MRB. If this is the case, the next batch is opened, the first tuple is decompressed, - * placed on the heap and this batch is marked as MRB. This is repeated until the - * top tuple from the heap is not from the MRB. After the top tuple is not from the - * MRB, all batches (and one ahead) which might contain the most recent tuple are - * opened and placed on the heap. - * - * In the example above, the first three batches are opened because the first two - * batches might contain tuples with a value of 0. - * (3c) The top element from the heap is removed, the next tuple from the batch is - * decompressed (if present) and placed on the heap. - * (3d) The former top tuple of the heap is returned. - * - * This function checks if the compression 'order by' and the query 'order by' are - * compatible and the optimization can be used. - */ -static MergeBatchResult -can_batch_sorted_merge(PlannerInfo *root, CompressionInfo *info, const Chunk *chunk) -{ - PathKey *pk; - Var *var; - Expr *expr; - char *column_name; - List *pathkeys = root->query_pathkeys; - MergeBatchResult merge_result = SCAN_FORWARD; - - /* Ensure that we have path keys and the chunk is ordered */ - if (pathkeys == NIL || ts_chunk_is_unordered(chunk)) - return MERGE_NOT_POSSIBLE; - - int nkeys = list_length(pathkeys); - - /* - * Loop over the pathkeys of the query. These pathkeys need to match the - * configured compress_orderby pathkeys. - */ - for (int pk_index = 0; pk_index < nkeys; pk_index++) - { - pk = list_nth(pathkeys, pk_index); - expr = find_em_expr_for_rel(pk->pk_eclass, info->chunk_rel); - - if (expr == NULL || !IsA(expr, Var)) - return MERGE_NOT_POSSIBLE; - - var = castNode(Var, expr); - - if (var->varattno <= 0) - return MERGE_NOT_POSSIBLE; - - column_name = get_attname(info->chunk_rte->relid, var->varattno, false); - int16 orderby_index = ts_array_position(info->settings->fd.orderby, column_name); - - if (orderby_index != pk_index + 1) - return MERGE_NOT_POSSIBLE; - - /* Check order, if the order of the first column do not match, switch to backward scan */ - Assert(pk->pk_strategy == BTLessStrategyNumber || - pk->pk_strategy == BTGreaterStrategyNumber); - - bool orderby_desc = - ts_array_get_element_bool(info->settings->fd.orderby_desc, orderby_index); - bool orderby_nullsfirst = - ts_array_get_element_bool(info->settings->fd.orderby_nullsfirst, orderby_index); - - if (pk->pk_strategy != BTLessStrategyNumber) - { - /* Test that ORDER BY and NULLS first/last do match in forward scan */ - if (orderby_desc && orderby_nullsfirst == pk->pk_nulls_first && - merge_result == SCAN_FORWARD) - continue; - /* Exact opposite in backward scan */ - else if (!orderby_desc && orderby_nullsfirst != pk->pk_nulls_first && - merge_result == SCAN_BACKWARD) - continue; - /* Switch scan direction on exact opposite order for first attribute */ - else if (!orderby_desc && orderby_nullsfirst != pk->pk_nulls_first && pk_index == 0) - merge_result = SCAN_BACKWARD; - else - return MERGE_NOT_POSSIBLE; - } - else - { - /* Test that ORDER BY and NULLS first/last do match in forward scan */ - if (!orderby_desc && orderby_nullsfirst == pk->pk_nulls_first && - merge_result == SCAN_FORWARD) - continue; - /* Exact opposite in backward scan */ - else if (orderby_desc && orderby_nullsfirst != pk->pk_nulls_first && - merge_result == SCAN_BACKWARD) - continue; - /* Switch scan direction on exact opposite order for first attribute */ - else if (orderby_desc && orderby_nullsfirst != pk->pk_nulls_first && pk_index == 0) - merge_result = SCAN_BACKWARD; - else - return MERGE_NOT_POSSIBLE; - } - } - - return merge_result; -} - /* * This function adds per-chunk sorted paths for compressed chunks if beneficial. This has two * advantages: @@ -745,86 +667,95 @@ can_batch_sorted_merge(PlannerInfo *root, CompressionInfo *info, const Chunk *ch * directly under the gather (merge) node and the per-chunk sorting are not used in parallel plans. * To save planning time, we therefore refrain from adding them. */ -static void -add_chunk_sorted_paths(PlannerInfo *root, RelOptInfo *chunk_rel, const Hypertable *ht, - Index ht_relid, Path *path, Path *compressed_path) +static Path * +make_chunk_sorted_path(PlannerInfo *root, RelOptInfo *chunk_rel, Path *path, Path *compressed_path, + const SortInfo *sort_info) { - if (root->query_pathkeys == NIL) - return; + /* + * Don't have a useful sorting after decompression. + */ + if (sort_info->decompressed_sort_pathkeys == NIL) + { + return NULL; + } /* We are only interested in regular (i.e., non index) paths */ if (!IsA(compressed_path, Path)) - return; - - /* Copy the decompress chunk path because the original can be recycled in add_path, and our - * sorted path must be independent. */ - if (!ts_is_decompress_chunk_path(path)) - return; - - DecompressChunkPath *decompress_chunk_path = - copy_decompress_chunk_path((DecompressChunkPath *) path); - - /* Iterate over the sort_pathkeys and generate all possible useful sorting */ - List *useful_pathkeys = NIL; - ListCell *lc; - foreach (lc, root->query_pathkeys) { - PathKey *pathkey = (PathKey *) lfirst(lc); - EquivalenceClass *pathkey_ec = pathkey->pk_eclass; + return NULL; + } - if (pathkey_ec->ec_has_volatile) - return; + Assert(ts_is_decompress_chunk_path(path)); - Expr *em_expr = find_em_expr_for_rel(pathkey_ec, chunk_rel); + /* + * We should be given an unsorted DecompressChunk path. + */ + Assert(path->pathkeys == NIL); - /* No em expression found for our rel */ - if (!em_expr) - return; + /* + * Create the sorted path for these useful_pathkeys. Copy the decompress + * chunk path because the original can be recycled in add_path, and our + * sorted path must be independent. + */ + DecompressChunkPath *path_copy = copy_decompress_chunk_path((DecompressChunkPath *) path); - /* We are only interested in sorting if this is a var */ - if (!IsA(em_expr, Var)) - return; + /* + * Sorting might require a projection to evaluate the sorting keys. It is + * added during Plan creation by prepare_sort_from_pathkeys(). However, we + * must account for the costs of projection already at the Path stage. + * One synthetic example is calculating min(x1 + x2 + ....), where the argument + * of min() is a heavy expression. We choose between normal aggregation and a + * special optimization for min() added by build_minmax_path(): an InitPlan + * that does ORDER BY + LIMIT 1. The aggregate costs always account + * for calculating the argument expression (see get_agg_clause_costs()). The + * sorting must as well, otherwise the sorting plan will always have lower + * costs, even when it's subpotimal in practice. The sorting cost with + * LIMIT 1 is essentially linear in the number of input tuples (see + * cost_tuplesort()). + * There is another complication: normally, the cost of expressions in + * targetlist is accounted for by the PathTarget.cost. However, the relation + * targetlists don't have the argument expression and only have the plain + * source Vars used there. The expression is added only later by + * apply_scanjoin_target_to_paths(), after we have already chosen the best + * path. Because of this, we have to account for it here in a hacky way. + * For further improvements, we might research what the Postgres declarative + * partitioning code does for this case, because it must have a similar + * problem. + */ + path_copy->custom_path.path.startup_cost += sort_info->decompressed_sort_pathkeys_cost.startup; + path_copy->custom_path.path.total_cost += + path_copy->custom_path.path.rows * sort_info->decompressed_sort_pathkeys_cost.per_tuple; - useful_pathkeys = lappend(useful_pathkeys, pathkey); + Path *sorted_path = (Path *) create_sort_path(root, + chunk_rel, + (Path *) path_copy, + sort_info->decompressed_sort_pathkeys, + root->limit_tuples); - /* Create the sorted path for these useful_pathkeys */ - if (!pathkeys_contained_in(useful_pathkeys, - decompress_chunk_path->custom_path.path.pathkeys)) - { - Path *sorted_path = - (Path *) create_sort_path(root, - chunk_rel, - &decompress_chunk_path->custom_path.path, - list_copy(useful_pathkeys), /* useful_pathkeys is modified - in each iteration */ - root->limit_tuples); - - add_path(chunk_rel, sorted_path); - } - } + return sorted_path; } -#define IS_UPDL_CMD(parse) \ - ((parse)->commandType == CMD_UPDATE || (parse)->commandType == CMD_DELETE) +static List *build_on_single_compressed_path(PlannerInfo *root, const Chunk *chunk, + RelOptInfo *chunk_rel, Path *compressed_path, + bool add_uncompressed_part, + List *uncompressed_table_pathlist, + const SortInfo *sort_info, + const CompressionInfo *compression_info); + void ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, const Hypertable *ht, const Chunk *chunk) { - RelOptInfo *compressed_rel; - ListCell *lc; - Index ht_relid = 0; - PlannerInfo *proot; - bool consider_partial = ts_chunk_is_partial(chunk); - /* * For UPDATE/DELETE commands, the executor decompresses and brings the rows into * the uncompressed chunk. Therefore, it's necessary to add the scan on the * uncompressed portion. */ + bool add_uncompressed_part = ts_chunk_is_partial(chunk); if (ts_chunk_is_compressed(chunk) && ts_cm_functions->decompress_target_segments && - !consider_partial) + !add_uncompressed_part) { - for (proot = root->parent_root; proot != NULL && !consider_partial; + for (PlannerInfo *proot = root->parent_root; proot != NULL && !add_uncompressed_part; proot = proot->parent_root) { /* @@ -832,9 +763,13 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, con * and the DML target relation are one and the same. But these kinds of queries * should be rare. */ - if (IS_UPDL_CMD(proot->parse)) + if (proot->parse->commandType == CMD_UPDATE || proot->parse->commandType == CMD_DELETE +#if PG15_GE + || proot->parse->commandType == CMD_MERGE +#endif + ) { - consider_partial = true; + add_uncompressed_part = true; } } } @@ -846,12 +781,13 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, con (compression_info->chunk_rel->reloptkind == RELOPT_BASEREL && ts_rte_is_marked_for_expansion(compression_info->chunk_rte))); - SortInfo sort_info = build_sortinfo(chunk, chunk_rel, compression_info, root->query_pathkeys); + SortInfo sort_info = + build_sortinfo(root, chunk, chunk_rel, compression_info, root->query_pathkeys); Assert(chunk->fd.compressed_chunk_id > 0); - List *initial_pathlist = chunk_rel->pathlist; - List *initial_partial_pathlist = chunk_rel->partial_pathlist; + List *uncompressed_table_pathlist = chunk_rel->pathlist; + List *uncompressed_table_parallel_pathlist = chunk_rel->partial_pathlist; chunk_rel->pathlist = NIL; chunk_rel->partial_pathlist = NIL; @@ -861,14 +797,29 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, con chunk, chunk_rel, sort_info.needs_sequence_num); - compressed_rel = compression_info->compressed_rel; + + if (sort_info.use_compressed_sort) + { + sort_info.required_compressed_pathkeys = + build_compressed_scan_pathkeys(&sort_info, + root, + root->query_pathkeys, + compression_info); + } + + RelOptInfo *compressed_rel = compression_info->compressed_rel; compressed_rel->consider_parallel = chunk_rel->consider_parallel; /* translate chunk_rel->baserestrictinfo */ - pushdown_quals(root, compression_info->settings, chunk_rel, compressed_rel, consider_partial); + pushdown_quals(root, + compression_info->settings, + chunk_rel, + compressed_rel, + add_uncompressed_part); set_baserel_size_estimates(root, compressed_rel); double new_row_estimate = compressed_rel->rows * TARGET_COMPRESSED_BATCH_SIZE; + Index ht_relid = 0; if (!compression_info->single_chunk) { /* adjust the parent's estimate by the diff of new and old estimate */ @@ -883,143 +834,158 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, con create_compressed_scan_paths(root, compressed_rel, compression_info, &sort_info); - /* compute parent relids of the chunk and use it to filter paths*/ - Relids parent_relids = NULL; - if (!compression_info->single_chunk) - parent_relids = find_childrel_parents(root, chunk_rel); - /* create non-parallel paths */ - foreach (lc, compressed_rel->pathlist) + ListCell *compressed_cell; + foreach (compressed_cell, compressed_rel->pathlist) { - Path *compressed_path = lfirst(lc); - + Path *compressed_path = lfirst(compressed_cell); + List *decompressed_paths = build_on_single_compressed_path(root, + chunk, + chunk_rel, + compressed_path, + add_uncompressed_part, + uncompressed_table_pathlist, + &sort_info, + compression_info); /* - * We skip any BitmapScan parameterized paths here as supporting - * those would require fixing up the internal scan. Since we - * currently do not do this BitmapScans would be generated - * when we have a parameterized path on a compressed column - * that would have invalid references due to our - * EquivalenceClasses. + * Add the paths to the chunk relation. */ - if (IsA(compressed_path, BitmapHeapPath) && compressed_path->param_info) - continue; + ListCell *decompressed_cell; + foreach (decompressed_cell, decompressed_paths) + { + Path *path = lfirst(decompressed_cell); + add_path(chunk_rel, path); + } + } + /* create parallel paths */ + List *uncompressed_paths_with_parallel = + list_concat(uncompressed_table_parallel_pathlist, uncompressed_table_pathlist); + foreach (compressed_cell, compressed_rel->partial_pathlist) + { + Path *compressed_path = lfirst(compressed_cell); + List *decompressed_paths = build_on_single_compressed_path(root, + chunk, + chunk_rel, + compressed_path, + add_uncompressed_part, + uncompressed_paths_with_parallel, + &sort_info, + compression_info); /* - * Filter out all paths that try to JOIN the compressed chunk on the - * hypertable or the uncompressed chunk - * Ideally, we wouldn't create these paths in the first place. - * However, create_join_clause code is called by PG while generating paths for the - * compressed_rel via generate_implied_equalities_for_column. - * create_join_clause ends up creating rinfo's between compressed_rel and ht because - * PG does not know that compressed_rel is related to ht in anyway. - * The parent-child relationship between chunk_rel and ht is known - * to PG and so it does not try to create meaningless rinfos for that case. + * Add the paths to the chunk relation. */ - if (compressed_path->param_info != NULL) + ListCell *decompressed_cell; + foreach (decompressed_cell, decompressed_paths) { - if (bms_is_member(chunk_rel->relid, compressed_path->param_info->ppi_req_outer)) - continue; - /* check if this is path made with references between - * compressed_rel + hypertable or a nesting subquery. - * The latter can happen in the case of UNION queries. see github 2917. This - * happens since PG is not aware that the nesting - * subquery that references the hypertable is a parent of compressed_rel as well. - */ - if (bms_overlap(parent_relids, compressed_path->param_info->ppi_req_outer)) - continue; + Path *path = lfirst(decompressed_cell); + add_partial_path(chunk_rel, path); + } + } - ListCell *lc_ri; - bool references_compressed = false; - /* - * Check if this path is parameterized on a compressed - * column. Ideally those paths wouldn't be generated - * in the first place but since we create compressed - * EquivalenceMembers for all EquivalenceClasses these - * Paths can happen and will fail at execution since - * the left and right side of the expression are not - * compatible. Therefore we skip any Path that is - * parameterized on a compressed column here. - */ - foreach (lc_ri, compressed_path->param_info->ppi_clauses) - { - RestrictInfo *ri = lfirst_node(RestrictInfo, lc_ri); + /* the chunk_rel now owns the paths, remove them from the compressed_rel so they can't be freed + * if it's planned */ + compressed_rel->pathlist = NIL; + compressed_rel->partial_pathlist = NIL; - if (ri->right_em && IsA(ri->right_em->em_expr, Var) && - (Index) castNode(Var, ri->right_em->em_expr)->varno == - compression_info->compressed_rel->relid) - { - Var *var = castNode(Var, ri->right_em->em_expr); - if (is_compressed_column(compression_info, var->vartype)) - { - references_compressed = true; - break; - } - } - if (ri->left_em && IsA(ri->left_em->em_expr, Var) && - (Index) castNode(Var, ri->left_em->em_expr)->varno == - compression_info->compressed_rel->relid) - { - Var *var = castNode(Var, ri->left_em->em_expr); - if (is_compressed_column(compression_info, var->vartype)) - { - references_compressed = true; - break; - } - } - } - if (references_compressed) - continue; - } + /* + * Remove the compressed_rel from planner arrays to prevent it from being + * referenced again. + */ + root->simple_rel_array[compressed_rel->relid] = NULL; + root->append_rel_array[compressed_rel->relid] = NULL; - Path *chunk_path = - (Path *) decompress_chunk_path_create(root, compression_info, 0, compressed_path); + /* We should never get in the situation with no viable paths. */ + Ensure(chunk_rel->pathlist, "could not create decompression path"); +} - /* - * Create a path for the batch sorted merge optimization. This optimization performs a - * merge append of the involved batches by using a binary heap and preserving the - * compression order. This optimization is only taken into consideration if we can't push - * down the sort to the compressed chunk. If we can push down the sort, the batches can be - * directly consumed in this order and we don't need to use this optimization. - */ - DecompressChunkPath *batch_merge_path = NULL; +/* + * Add various decompression paths that are possible based on the given + * compressed path. + */ +static List * +build_on_single_compressed_path(PlannerInfo *root, const Chunk *chunk, RelOptInfo *chunk_rel, + Path *compressed_path, bool add_uncompressed_part, + List *uncompressed_table_pathlist, const SortInfo *sort_info, + const CompressionInfo *compression_info) +{ + /* + * We skip any BitmapScan parameterized paths here as supporting + * those would require fixing up the internal scan. Since we + * currently do not do this BitmapScans would be generated + * when we have a parameterized path on a compressed column + * that would have invalid references due to our + * EquivalenceClasses. + */ + if (IsA(compressed_path, BitmapHeapPath) && compressed_path->param_info) + return NIL; - if (ts_guc_enable_decompression_sorted_merge && !sort_info.can_pushdown_sort) + /* + * Filter out all paths that try to JOIN the compressed chunk on the + * hypertable or the uncompressed chunk + * Ideally, we wouldn't create these paths in the first place. + * However, create_join_clause code is called by PG while generating paths for the + * compressed_rel via generate_implied_equalities_for_column. + * create_join_clause ends up creating rinfo's between compressed_rel and ht because + * PG does not know that compressed_rel is related to ht in anyway. + * The parent-child relationship between chunk_rel and ht is known + * to PG and so it does not try to create meaningless rinfos for that case. + */ + if (compressed_path->param_info != NULL) + { + if (bms_is_member(chunk_rel->relid, compressed_path->param_info->ppi_req_outer)) + return NIL; + + /* check if this is path made with references between + * compressed_rel + hypertable or a nesting subquery. + * The latter can happen in the case of UNION queries. see github 2917. This + * happens since PG is not aware that the nesting + * subquery that references the hypertable is a parent of compressed_rel as well. + */ + if (bms_overlap(compression_info->parent_relids, + compressed_path->param_info->ppi_req_outer)) { - MergeBatchResult merge_result = can_batch_sorted_merge(root, compression_info, chunk); - if (merge_result != MERGE_NOT_POSSIBLE) - { - batch_merge_path = copy_decompress_chunk_path((DecompressChunkPath *) chunk_path); - - batch_merge_path->reverse = (merge_result != SCAN_FORWARD); - batch_merge_path->batch_sorted_merge = true; - - /* The segment by optimization is only enabled if it can deliver the tuples in the - * same order as the query requested it. So, we can just copy the pathkeys of the - * query here. - */ - batch_merge_path->custom_path.path.pathkeys = root->query_pathkeys; - cost_batch_sorted_merge(root, compression_info, batch_merge_path, compressed_path); - - /* If the chunk is partially compressed, prepare the path only and add it later - * to a merge append path when we are able to generate the ordered result for the - * compressed and uncompressed part of the chunk. - */ - if (!consider_partial) - add_path(chunk_rel, &batch_merge_path->custom_path.path); - } + return NIL; } + } + + Path *chunk_path_no_sort = + (Path *) decompress_chunk_path_create(root, compression_info, compressed_path); + List *decompressed_paths = list_make1(chunk_path_no_sort); - /* If we can push down the sort below the DecompressChunk node, we set the pathkeys of - * the decompress node to the query pathkeys, while remembering the compressed_pathkeys - * corresponding to those query_pathkeys. We will determine whether to put a sort - * between the decompression node and the scan during plan creation */ - if (sort_info.can_pushdown_sort) + /* + * If we can push down the sort below the DecompressChunk node, we set the pathkeys of + * the decompress node to the query pathkeys, while remembering the compressed_pathkeys + * corresponding to those query_pathkeys. We will determine whether to put a sort + * between the decompression node and the scan during plan creation. + */ + if (sort_info->use_compressed_sort) + { + if (pathkeys_contained_in(sort_info->required_compressed_pathkeys, + compressed_path->pathkeys)) { + /* + * The compressed path already has the required ordering. Modify + * in place the no-sorting path we just created above. + */ + DecompressChunkPath *path = (DecompressChunkPath *) chunk_path_no_sort; + path->reverse = sort_info->reverse; + path->needs_sequence_num = sort_info->needs_sequence_num; + path->required_compressed_pathkeys = sort_info->required_compressed_pathkeys; + path->custom_path.path.pathkeys = root->query_pathkeys; + } + else + { + /* + * We must sort the underlying compressed path to get the + * required ordering. Make a copy of no-sorting path and modify + * it accordingly + */ DecompressChunkPath *path_copy = - copy_decompress_chunk_path((DecompressChunkPath *) chunk_path); - path_copy->reverse = sort_info.reverse; - path_copy->needs_sequence_num = sort_info.needs_sequence_num; - path_copy->required_compressed_pathkeys = sort_info.required_compressed_pathkeys; + copy_decompress_chunk_path((DecompressChunkPath *) chunk_path_no_sort); + path_copy->reverse = sort_info->reverse; + path_copy->needs_sequence_num = sort_info->needs_sequence_num; + path_copy->required_compressed_pathkeys = sort_info->required_compressed_pathkeys; path_copy->custom_path.path.pathkeys = root->query_pathkeys; /* @@ -1028,205 +994,175 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, con * creation. Examples of this in: create_merge_append_path & * create_merge_append_plan */ - if (!pathkeys_contained_in(sort_info.required_compressed_pathkeys, - compressed_path->pathkeys)) - { - Path sort_path; /* dummy for result of cost_sort */ - - cost_sort(&sort_path, - root, - sort_info.required_compressed_pathkeys, - compressed_path->total_cost, - compressed_path->rows, - compressed_path->pathtarget->width, - 0.0, - work_mem, - -1); - - cost_decompress_chunk(root, &path_copy->custom_path.path, &sort_path); - } + Path sort_path; /* dummy for result of cost_sort */ + + cost_sort(&sort_path, + root, + sort_info->required_compressed_pathkeys, + compressed_path->total_cost, + compressed_path->rows, + compressed_path->pathtarget->width, + 0.0, + work_mem, + -1); - chunk_path = &path_copy->custom_path.path; + cost_decompress_chunk(root, &path_copy->custom_path.path, &sort_path); + + decompressed_paths = lappend(decompressed_paths, path_copy); } + } - /* - * If this is a partially compressed chunk we have to combine data - * from compressed and uncompressed chunk. - */ - if (consider_partial) - { - Bitmapset *req_outer = PATH_REQ_OUTER(chunk_path); - Path *uncompressed_path = - get_cheapest_path_for_pathkeys(initial_pathlist, NIL, req_outer, TOTAL_COST, false); + /* + * Create a path for the batch sorted merge optimization. This optimization + * performs a sorted merge of the involved batches by using a binary heap + * and preserving the compression order. This optimization is only + * considered if we can't push down the sort to the compressed chunk. If we + * can push down the sort, the batches can be directly consumed in this + * order and we don't need to use this optimization. + */ + if (sort_info->use_batch_sorted_merge && ts_guc_enable_decompression_sorted_merge) + { + Assert(!sort_info->use_compressed_sort); - /* - * All children of an append path are required to have the same parameterization - * so we reparameterize here when we couldn't get a path with the parameterization - * we need. Reparameterization should always succeed here since uncompressed_path - * should always be a scan. - */ - if (!bms_equal(req_outer, PATH_REQ_OUTER(uncompressed_path))) - { - uncompressed_path = reparameterize_path(root, uncompressed_path, req_outer, 1.0); - if (!uncompressed_path) - continue; - } + DecompressChunkPath *path_copy = + copy_decompress_chunk_path((DecompressChunkPath *) chunk_path_no_sort); - /* If we were able to generate a batch merge path, create a merge append path - * that combines the result of the compressed and uncompressed part of the chunk. The - * uncompressed part will be sorted, the batch_merge_path is already properly sorted. - */ - if (batch_merge_path != NULL) - { - chunk_path = (Path *) create_merge_append_path(root, - chunk_rel, - list_make2(batch_merge_path, - uncompressed_path), - root->query_pathkeys, - req_outer); - } - else - { - /* Check all pathkey components can be satisfied by current chunk */ - List *pathkeys = NIL; - ListCell *lc; - foreach (lc, root->query_pathkeys) - { - PathKey *pathkey = (PathKey *) lfirst(lc); - EquivalenceClass *pathkey_ec = pathkey->pk_eclass; + path_copy->reverse = sort_info->reverse; + path_copy->batch_sorted_merge = true; - Expr *em_expr = find_em_expr_for_rel(pathkey_ec, chunk_rel); + /* The segment by optimization is only enabled if it can deliver the tuples in the + * same order as the query requested it. So, we can just copy the pathkeys of the + * query here. + */ + path_copy->custom_path.path.pathkeys = root->query_pathkeys; + cost_batch_sorted_merge(root, compression_info, path_copy, compressed_path); - /* No em expression found for our rel */ - if (!em_expr) - break; + decompressed_paths = lappend(decompressed_paths, path_copy); + } - pathkeys = lappend(pathkeys, pathkey); - } - /* - * Ideally, we would like for this to be a MergeAppend path. - * However, accumulate_append_subpath will cut out MergeAppend - * and directly add its children, so we have to combine the children - * into a MergeAppend node later, at the chunk append level. - */ - chunk_path = - (Path *) create_append_path(root, - chunk_rel, - list_make2(chunk_path, uncompressed_path), - NIL /* partial paths */, - pathkeys, - req_outer, - 0, - false, - chunk_path->rows + uncompressed_path->rows); - } + /* + * Also try explicit sort after decompression, if we couldn't push down the + * sort. Don't do this for parallel plans, because in this case it is + * typically done with Sort under Gather node. This splits the Sort in + * per-worker buckets, so splitting the buckets further per-chunk is less + * important. + */ + if (!sort_info->use_compressed_sort && chunk_path_no_sort->parallel_workers == 0) + { + Path *sort_above_chunk = + make_chunk_sorted_path(root, chunk_rel, chunk_path_no_sort, compressed_path, sort_info); + if (sort_above_chunk != NULL) + { + decompressed_paths = lappend(decompressed_paths, sort_above_chunk); } + } + + if (!add_uncompressed_part) + { + /* + * If the chunk has only the compressed part, we're done. + */ + return decompressed_paths; + } - /* Add useful sorted versions of the decompress path */ - add_chunk_sorted_paths(root, chunk_rel, ht, ht_relid, chunk_path, compressed_path); + /* + * This is a partially compressed chunk, we have to combine data from + * compressed and uncompressed chunk. + */ + List *combined_paths = NIL; + Bitmapset *req_outer = PATH_REQ_OUTER(chunk_path_no_sort); + Path *uncompressed_path = get_cheapest_path_for_pathkeys(uncompressed_table_pathlist, + NIL, + req_outer, + TOTAL_COST, + false); - /* this has to go after the path is copied for the ordered path since path can get freed - * in add_path */ - add_path(chunk_rel, chunk_path); + /* + * All children of an append path are required to have the same parameterization + * so we reparameterize here when we couldn't get a path with the parameterization + * we need. Reparameterization should always succeed here since uncompressed_path + * should always be a scan. + */ + if (!bms_equal(req_outer, PATH_REQ_OUTER(uncompressed_path))) + { + uncompressed_path = reparameterize_path(root, uncompressed_path, req_outer, 1.0); + if (!uncompressed_path) + return NIL; } - /* the chunk_rel now owns the paths, remove them from the compressed_rel so they can't be - * freed if it's planned */ - compressed_rel->pathlist = NIL; - /* create parallel paths */ - if (compressed_rel->consider_parallel) + ListCell *lc; + foreach (lc, decompressed_paths) { - foreach (lc, compressed_rel->partial_pathlist) + /* + * Combine decompressed path with uncompressed part of the chunk, + * using either MergeAppend or plain Append, depending on + * whether it has sorting. + * + * Another consideration is parallel plans. Postgres currently doesn't + * use MergeAppend under GatherMerge, i.e. as part of parallel plans. + * This is mostly relevant to the append over chunks which is created by + * Postgres. Here we are creating a MergeAppend for a partial chunk, + * parallelizing it by itself is probably less important, so in this + * case we just create a plain Append instead of MergeAppend even for + * ordered chunk paths. + */ + Path *combined_path = NULL; + Path *decompression_path = lfirst(lc); + const int workers = + Max(decompression_path->parallel_workers, uncompressed_path->parallel_workers); + if (decompression_path->pathkeys == NIL || workers > 0) { - Path *compressed_path = lfirst(lc); - Path *path; - if (compressed_path->param_info != NULL && - (bms_is_member(chunk_rel->relid, compressed_path->param_info->ppi_req_outer) || - (!compression_info->single_chunk && - bms_is_member(ht_relid, compressed_path->param_info->ppi_req_outer)))) - continue; - /* - * If this is a partially compressed chunk we have to combine data - * from compressed and uncompressed chunk. + * Append distinguishes paths that are parallel and not, and uses + * this for cost estimation, so we have to distinguish them as well + * here. */ - path = (Path *) decompress_chunk_path_create(root, - compression_info, - compressed_path->parallel_workers, - compressed_path); + List *parallel_paths = NIL; + List *sequential_paths = NIL; - if (consider_partial) + if (decompression_path->parallel_workers > 0) { - Bitmapset *req_outer = PATH_REQ_OUTER(path); - Path *uncompressed_path = NULL; - bool uncompressed_path_is_partial = true; - - if (initial_partial_pathlist) - uncompressed_path = get_cheapest_path_for_pathkeys(initial_partial_pathlist, - NIL, - req_outer, - TOTAL_COST, - true); - - if (!uncompressed_path) - { - uncompressed_path = get_cheapest_path_for_pathkeys(initial_pathlist, - NIL, - req_outer, - TOTAL_COST, - true); - uncompressed_path_is_partial = false; - } - - /* - * All children of an append path are required to have the same parameterization - * so we reparameterize here when we couldn't get a path with the - * parameterization we need. Reparameterization should always succeed here since - * uncompressed_path should always be a scan. - */ - if (!bms_equal(req_outer, PATH_REQ_OUTER(uncompressed_path))) - { - uncompressed_path = - reparameterize_path(root, uncompressed_path, req_outer, 1.0); - if (!uncompressed_path) - continue; - } - - /* uncompressed_path can be a partial or a non-partial path. Categorize the path - * and add it to the proper list of the append path. */ - List *partial_path_list = list_make1(path); - List *path_list = NIL; + parallel_paths = lappend(parallel_paths, decompression_path); + } + else + { + sequential_paths = lappend(sequential_paths, decompression_path); + } - if (uncompressed_path_is_partial) - partial_path_list = lappend(partial_path_list, uncompressed_path); - else - path_list = list_make1(uncompressed_path); - - /* Use a parallel aware append to handle non-partial paths properly */ - path = (Path *) create_append_path(root, - chunk_rel, - path_list, - partial_path_list, - NIL /* pathkeys */, - req_outer, - Max(path->parallel_workers, - uncompressed_path->parallel_workers), - true, /* parallel aware */ - path->rows + uncompressed_path->rows); + if (uncompressed_path->parallel_workers > 0) + { + parallel_paths = lappend(parallel_paths, uncompressed_path); + } + else + { + sequential_paths = lappend(sequential_paths, uncompressed_path); } - add_partial_path(chunk_rel, path); + combined_path = + (Path *) create_append_path(root, + chunk_rel, + sequential_paths, + parallel_paths, + /* pathkeys = */ NIL, + req_outer, + workers, + workers > 0, + decompression_path->rows + uncompressed_path->rows); + } + else + { + combined_path = + (Path *) create_merge_append_path(root, + chunk_rel, + list_make2(decompression_path, uncompressed_path), + decompression_path->pathkeys, + req_outer); } - /* the chunk_rel now owns the paths, remove them from the compressed_rel so they can't - * be freed if it's planned */ - compressed_rel->partial_pathlist = NIL; + + combined_paths = lappend(combined_paths, combined_path); } - /* Remove the compressed_rel from the simple_rel_array to prevent it from - * being referenced again. */ - root->simple_rel_array[compressed_rel->relid] = NULL; - /* We should never get in the situation with no viable paths. */ - Ensure(chunk_rel->pathlist, "could not create decompression path"); + return combined_paths; } /* @@ -1797,24 +1733,19 @@ decompress_chunk_add_plannerinfo(PlannerInfo *root, CompressionInfo *info, const RelOptInfo *chunk_rel, bool needs_sequence_num) { Index compressed_index = root->simple_rel_array_size; - FormData_chunk compressed_fd = ts_chunk_get_formdata(chunk->fd.compressed_chunk_id); - Oid compressed_reloid = ts_get_relation_relid(NameStr(compressed_fd.schema_name), - NameStr(compressed_fd.table_name), - /* return_invalid = */ false); /* * Add the compressed chunk to the baserel cache. Note that it belongs to * a different hypertable, the internal compression table. */ - Oid compression_hypertable_reloid = - ts_hypertable_id_to_relid(compressed_fd.hypertable_id, /* return_invalid = */ false); - ts_add_baserel_cache_entry_for_chunk(compressed_reloid, - ts_planner_get_hypertable(compression_hypertable_reloid, - CACHE_FLAG_NONE)); + ts_add_baserel_cache_entry_for_chunk( + info->compressed_reloid, + ts_planner_get_hypertable(info->compression_hypertable_reloid, CACHE_FLAG_NONE)); expand_planner_arrays(root, 1); - info->compressed_rte = - decompress_chunk_make_rte(compressed_reloid, info->chunk_rte->rellockmode, root->parse); + info->compressed_rte = decompress_chunk_make_rte(info->compressed_reloid, + info->chunk_rte->rellockmode, + root->parse); root->simple_rte_array[compressed_index] = info->compressed_rte; root->parse->rtable = lappend(root->parse->rtable, info->compressed_rte); @@ -1866,11 +1797,33 @@ decompress_chunk_add_plannerinfo(PlannerInfo *root, CompressionInfo *info, const compressed_rel_setup_equivalence_classes(root, info); /* translate chunk_rel->joininfo for compressed_rel */ compressed_rel_setup_joininfo(compressed_rel, info); + + /* + * Force parallel plan creation, see compute_parallel_worker(). + * This is not compatible with ts_classify_relation(), but on the other hand + * the compressed chunk rel shouldn't exist anywhere outside of the + * decompression planning, it is removed at the end. + * + * This is not needed for direct select from a single chunk, in which case + * the chunk reloptkind will be RELOPT_BASEREL + */ + if (chunk_rel->reloptkind == RELOPT_OTHER_MEMBER_REL) + { + compressed_rel->reloptkind = RELOPT_OTHER_MEMBER_REL; + + /* + * We have to minimally initialize the append relation info for the + * compressed chunks, so that the generate_implied_equalities() works. + * Only the parent hypertable relindex is needed. + */ + root->append_rel_array[compressed_rel->relid] = makeNode(AppendRelInfo); + root->append_rel_array[compressed_rel->relid]->parent_relid = info->ht_rel->relid; + compressed_rel->top_parent_relids = chunk_rel->top_parent_relids; + } } static DecompressChunkPath * -decompress_chunk_path_create(PlannerInfo *root, CompressionInfo *info, int parallel_workers, - Path *compressed_path) +decompress_chunk_path_create(PlannerInfo *root, const CompressionInfo *info, Path *compressed_path) { DecompressChunkPath *path; @@ -1909,9 +1862,10 @@ decompress_chunk_path_create(PlannerInfo *root, CompressionInfo *info, int paral * in a parallel plan we only set parallel_safe to true * when parallel_workers is greater than 0 which is only * the case when creating partial paths. */ - path->custom_path.path.parallel_safe = parallel_workers > 0; - path->custom_path.path.parallel_workers = parallel_workers; path->custom_path.path.parallel_aware = false; + path->custom_path.path.parallel_safe = + info->chunk_rel->consider_parallel && compressed_path->parallel_safe; + path->custom_path.path.parallel_workers = compressed_path->parallel_workers; path->custom_path.custom_paths = list_make1(compressed_path); path->reverse = false; @@ -1926,8 +1880,8 @@ decompress_chunk_path_create(PlannerInfo *root, CompressionInfo *info, int paral */ static void -create_compressed_scan_paths(PlannerInfo *root, RelOptInfo *compressed_rel, CompressionInfo *info, - SortInfo *sort_info) +create_compressed_scan_paths(PlannerInfo *root, RelOptInfo *compressed_rel, + const CompressionInfo *compression_info, const SortInfo *sort_info) { Path *compressed_path; @@ -1941,27 +1895,26 @@ create_compressed_scan_paths(PlannerInfo *root, RelOptInfo *compressed_rel, Comp compressed_path = create_seqscan_path(root, compressed_rel, NULL, 0); add_path(compressed_rel, compressed_path); - /* create parallel scan path */ + /* + * Create parallel seq scan path. + * We marked the compressed rel as RELOPT_OTHER_MEMBER_REL when creating it, + * so we should get a nonzero number of parallel workers even for small + * tables, so that they don't prevent parallelism in the entire append plan. + * See compute_parallel_workers(). This also applies to the creation of + * index paths below. + */ if (compressed_rel->consider_parallel) { - /* Almost the same functionality as ts_create_plain_partial_paths. - * - * However, we also create a partial path for small chunks to allow PostgreSQL to choose - * a parallel plan for decompression. If no partial path is present for a single chunk, - * PostgreSQL will not use a parallel plan and all chunks are decompressed by a - * non-parallel plan (even if there are a few bigger chunks). - */ int parallel_workers = compute_parallel_worker(compressed_rel, compressed_rel->pages, -1, max_parallel_workers_per_gather); - /* Use at least one worker */ - parallel_workers = Max(parallel_workers, 1); - - /* Add an unordered partial path based on a parallel sequential scan. */ - add_partial_path(compressed_rel, - create_seqscan_path(root, compressed_rel, NULL, parallel_workers)); + if (parallel_workers > 0) + { + add_partial_path(compressed_rel, + create_seqscan_path(root, compressed_rel, NULL, parallel_workers)); + } } /* @@ -1976,7 +1929,7 @@ create_compressed_scan_paths(PlannerInfo *root, RelOptInfo *compressed_rel, Comp bool old_bitmapscan = enable_bitmapscan; enable_bitmapscan = false; - if (sort_info->can_pushdown_sort) + if (sort_info->use_compressed_sort) { /* * If we can push down sort below decompression we temporarily switch @@ -1985,8 +1938,7 @@ create_compressed_scan_paths(PlannerInfo *root, RelOptInfo *compressed_rel, Comp */ List *orig_pathkeys = root->query_pathkeys; List *orig_eq_classes = root->eq_classes; - Bitmapset *orig_eclass_indexes = info->compressed_rel->eclass_indexes; - build_compressed_scan_pathkeys(sort_info, root, root->query_pathkeys, info); + Bitmapset *orig_eclass_indexes = compression_info->compressed_rel->eclass_indexes; root->query_pathkeys = sort_info->required_compressed_pathkeys; /* We can optimize iterating over EquivalenceClasses by reducing them to @@ -1996,24 +1948,24 @@ create_compressed_scan_paths(PlannerInfo *root, RelOptInfo *compressed_rel, Comp * * Clauseless joins work fine since they don't rely on eclass_indexes. */ - if (!info->chunk_rel->has_eclass_joins) + if (!compression_info->chunk_rel->has_eclass_joins) { int i = -1; List *required_eq_classes = NIL; - while ((i = bms_next_member(info->compressed_rel->eclass_indexes, i)) >= 0) + while ((i = bms_next_member(compression_info->compressed_rel->eclass_indexes, i)) >= 0) { EquivalenceClass *cur_ec = (EquivalenceClass *) list_nth(root->eq_classes, i); required_eq_classes = lappend(required_eq_classes, cur_ec); } root->eq_classes = required_eq_classes; - info->compressed_rel->eclass_indexes = NULL; + compression_info->compressed_rel->eclass_indexes = NULL; } check_index_predicates(root, compressed_rel); create_index_paths(root, compressed_rel); root->query_pathkeys = orig_pathkeys; root->eq_classes = orig_eq_classes; - info->compressed_rel->eclass_indexes = orig_eclass_indexes; + compression_info->compressed_rel->eclass_indexes = orig_eclass_indexes; } else { @@ -2091,8 +2043,8 @@ decompress_chunk_make_rte(Oid compressed_relid, LOCKMODE lockmode, Query *parse) * This will detect Var = Const and Var = Param and set the corresponding bit * in CompressionInfo->chunk_const_segmentby. */ -static void -find_const_segmentby(RelOptInfo *chunk_rel, CompressionInfo *info) +static Bitmapset * +find_const_segmentby(RelOptInfo *chunk_rel, const CompressionInfo *info) { Bitmapset *segmentby_columns = NULL; @@ -2141,7 +2093,103 @@ find_const_segmentby(RelOptInfo *chunk_rel, CompressionInfo *info) } } } - info->chunk_const_segmentby = segmentby_columns; + + return segmentby_columns; +} + +/* + * Returns whether the pathkeys starting at the given offset match the compression + * orderby, and whether the order is reverse. + */ +static bool +match_pathkeys_to_compression_orderby(List *pathkeys, List *chunk_em_exprs, + int starting_pathkey_offset, + const CompressionInfo *compression_info, bool *out_reverse) +{ + int compressed_pk_index = 0; + for (int i = starting_pathkey_offset; i < list_length(pathkeys); i++) + { + compressed_pk_index++; + PathKey *pk = list_nth_node(PathKey, pathkeys, i); + Expr *expr = (Expr *) list_nth(chunk_em_exprs, i); + + if (expr == NULL || !IsA(expr, Var)) + { + return false; + } + + Var *var = castNode(Var, expr); + + if (var->varattno <= 0) + { + return false; + } + + char *column_name = get_attname(compression_info->chunk_rte->relid, var->varattno, false); + int orderby_index = ts_array_position(compression_info->settings->fd.orderby, column_name); + + if (orderby_index != compressed_pk_index) + { + return false; + } + + bool orderby_desc = + ts_array_get_element_bool(compression_info->settings->fd.orderby_desc, orderby_index); + bool orderby_nullsfirst = + ts_array_get_element_bool(compression_info->settings->fd.orderby_nullsfirst, + orderby_index); + + /* + * pk_strategy is either BTLessStrategyNumber (for ASC) or + * BTGreaterStrategyNumber (for DESC) + */ + bool this_pathkey_reverse = false; + if (pk->pk_strategy == BTLessStrategyNumber) + { + if (!orderby_desc && orderby_nullsfirst == pk->pk_nulls_first) + { + this_pathkey_reverse = false; + } + else if (orderby_desc && orderby_nullsfirst != pk->pk_nulls_first) + { + this_pathkey_reverse = true; + } + else + { + return false; + } + } + else if (pk->pk_strategy == BTGreaterStrategyNumber) + { + if (orderby_desc && orderby_nullsfirst == pk->pk_nulls_first) + { + this_pathkey_reverse = false; + } + else if (!orderby_desc && orderby_nullsfirst != pk->pk_nulls_first) + { + this_pathkey_reverse = true; + } + else + { + return false; + } + } + + /* + * first pathkey match determines if this is forward or backward scan + * any further pathkey items need to have same direction + */ + if (compressed_pk_index == 1) + { + *out_reverse = this_pathkey_reverse; + } + else if (this_pathkey_reverse != *out_reverse) + { + return false; + } + } + + return true; } /* @@ -2155,21 +2203,73 @@ find_const_segmentby(RelOptInfo *chunk_rel, CompressionInfo *info) * If query pathkeys is shorter than segmentby + compress_orderby pushdown can still be done */ static SortInfo -build_sortinfo(const Chunk *chunk, RelOptInfo *chunk_rel, CompressionInfo *info, List *pathkeys) +build_sortinfo(PlannerInfo *root, const Chunk *chunk, RelOptInfo *chunk_rel, + const CompressionInfo *compression_info, List *pathkeys) { - int pk_index; - PathKey *pk; Var *var; - Expr *expr; char *column_name; - ListCell *lc = list_head(pathkeys); - SortInfo sort_info = { .can_pushdown_sort = false, .needs_sequence_num = false }; + ListCell *lc; + SortInfo sort_info = { 0 }; - if (pathkeys == NIL || ts_chunk_is_unordered(chunk)) + if (pathkeys == NIL) + { + return sort_info; + } + + /* + * Translate the pathkeys to chunk expressions, creating a List of them + * parallel to the pathkeys list, with NULL entries if we didn't find a + * match. + */ + List *chunk_em_exprs = NIL; + foreach (lc, pathkeys) + { + PathKey *pk = lfirst(lc); + EquivalenceClass *ec = pk->pk_eclass; + Expr *em_expr = NULL; + if (!ec->ec_has_volatile) + { + em_expr = find_em_expr_for_rel(pk->pk_eclass, compression_info->chunk_rel); + } + chunk_em_exprs = lappend(chunk_em_exprs, em_expr); + } + Assert(list_length(chunk_em_exprs) == list_length(pathkeys)); + + /* Find the pathkeys we can use for explicitly sorting after decompression. */ + List *sort_pathkey_exprs = NIL; + List *sort_pathkeys = NIL; + for (int i = 0; i < list_length(chunk_em_exprs); i++) + { + PathKey *pk = list_nth_node(PathKey, pathkeys, i); + Expr *chunk_em_expr = (Expr *) list_nth(chunk_em_exprs, i); + if (chunk_em_expr == NULL) + { + break; + } + + sort_pathkeys = lappend(sort_pathkeys, pk); + sort_pathkey_exprs = lappend(sort_pathkey_exprs, chunk_em_expr); + } + + if (sort_pathkeys == NIL) + { + return sort_info; + } + + sort_info.decompressed_sort_pathkeys = sort_pathkeys; + cost_qual_eval(&sort_info.decompressed_sort_pathkeys_cost, sort_pathkey_exprs, root); + + /* + * Next, check if we can push the sort down to the uncompressed part. + * + * Not possible if the chunk is unordered. + */ + if (ts_chunk_is_unordered(chunk)) return sort_info; /* all segmentby columns need to be prefix of pathkeys */ - if (info->num_segmentby_columns > 0) + int i = 0; + if (compression_info->num_segmentby_columns > 0) { Bitmapset *segmentby_columns; @@ -2177,19 +2277,18 @@ build_sortinfo(const Chunk *chunk, RelOptInfo *chunk_rel, CompressionInfo *info, * initialize segmentby with equality constraints from baserestrictinfo because * those columns dont need to be prefix of pathkeys */ - find_const_segmentby(chunk_rel, info); - segmentby_columns = bms_copy(info->chunk_const_segmentby); + segmentby_columns = bms_copy(compression_info->chunk_const_segmentby); /* * loop over pathkeys until we find one that is not a segmentby column * we keep looping even if we found all segmentby columns in case a * columns appears both in baserestrictinfo and in ORDER BY clause */ - for (; lc != NULL; lc = lnext(pathkeys, lc)) + for (i = 0; i < list_length(pathkeys); i++) { - Assert(bms_num_members(segmentby_columns) <= info->num_segmentby_columns); - pk = lfirst(lc); - expr = find_em_expr_for_rel(pk->pk_eclass, info->chunk_rel); + Assert(bms_num_members(segmentby_columns) <= compression_info->num_segmentby_columns); + + Expr *expr = (Expr *) list_nth(chunk_em_exprs, i); if (expr == NULL || !IsA(expr, Var)) break; @@ -2198,94 +2297,62 @@ build_sortinfo(const Chunk *chunk, RelOptInfo *chunk_rel, CompressionInfo *info, if (var->varattno <= 0) break; - column_name = get_attname(info->chunk_rte->relid, var->varattno, false); - if (!ts_array_is_member(info->settings->fd.segmentby, column_name)) + column_name = get_attname(compression_info->chunk_rte->relid, var->varattno, false); + if (!ts_array_is_member(compression_info->settings->fd.segmentby, column_name)) break; segmentby_columns = bms_add_member(segmentby_columns, var->varattno); } /* - * if pathkeys still has items but we didn't find all segmentby columns - * we cannot push down sort + * If pathkeys still has items, but we didn't find all segmentby columns, + * we cannot satisfy these pathkeys by sorting the compressed chunk table. */ - if (lc != NULL && bms_num_members(segmentby_columns) != info->num_segmentby_columns) + if (i != list_length(pathkeys) && + bms_num_members(segmentby_columns) != compression_info->num_segmentby_columns) + { + /* + * If we didn't have any segmentby columns in pathkeys, try batch sorted merge + * instead. + */ + if (i == 0) + { + sort_info.use_batch_sorted_merge = + match_pathkeys_to_compression_orderby(pathkeys, + chunk_em_exprs, + /* starting_pathkey_offset = */ 0, + compression_info, + &sort_info.reverse); + } return sort_info; + } + } + + if (i == list_length(pathkeys)) + { + /* + * Pathkeys satisfied by sorting the compressed data on segmentby columns. + */ + sort_info.use_compressed_sort = true; + return sort_info; } /* - * if pathkeys includes columns past segmentby columns - * we need sequence_num in the targetlist for ordering + * Pathkeys includes columns past segmentby columns, so we need sequence_num + * in the targetlist for ordering. */ - if (lc != NULL) - sort_info.needs_sequence_num = true; + sort_info.needs_sequence_num = true; /* * loop over the rest of pathkeys * this needs to exactly match the configured compress_orderby */ - for (pk_index = 1; lc != NULL; lc = lnext(pathkeys, lc), pk_index++) - { - bool reverse = false; - pk = lfirst(lc); - expr = find_em_expr_for_rel(pk->pk_eclass, info->chunk_rel); - - if (expr == NULL || !IsA(expr, Var)) - return sort_info; - - var = castNode(Var, expr); - - if (var->varattno <= 0) - return sort_info; - - column_name = get_attname(info->chunk_rte->relid, var->varattno, false); - int orderby_index = ts_array_position(info->settings->fd.orderby, column_name); - - if (orderby_index != pk_index) - return sort_info; - - bool orderby_desc = - ts_array_get_element_bool(info->settings->fd.orderby_desc, orderby_index); - bool orderby_nullsfirst = - ts_array_get_element_bool(info->settings->fd.orderby_nullsfirst, orderby_index); - - /* - * pk_strategy is either BTLessStrategyNumber (for ASC) or - * BTGreaterStrategyNumber (for DESC) - */ - if (pk->pk_strategy == BTLessStrategyNumber) - { - if (!orderby_desc && orderby_nullsfirst == pk->pk_nulls_first) - reverse = false; - else if (orderby_desc && orderby_nullsfirst != pk->pk_nulls_first) - reverse = true; - else - return sort_info; - } - else if (pk->pk_strategy == BTGreaterStrategyNumber) - { - if (orderby_desc && orderby_nullsfirst == pk->pk_nulls_first) - reverse = false; - else if (!orderby_desc && orderby_nullsfirst != pk->pk_nulls_first) - reverse = true; - else - return sort_info; - } - - /* - * first pathkey match determines if this is forward or backward scan - * any further pathkey items need to have same direction - */ - if (pk_index == 1) - sort_info.reverse = reverse; - else if (reverse != sort_info.reverse) - return sort_info; - } - - /* all pathkeys should be processed */ - Assert(lc == NULL); + sort_info.use_compressed_sort = match_pathkeys_to_compression_orderby(pathkeys, + chunk_em_exprs, + i, + compression_info, + &sort_info.reverse); - sort_info.can_pushdown_sort = true; return sort_info; } diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.h b/tsl/src/nodes/decompress_chunk/decompress_chunk.h index d224c2a3394..77f2663e1a3 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_chunk.h +++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.h @@ -22,6 +22,10 @@ typedef struct CompressionInfo RangeTblEntry *compressed_rte; RangeTblEntry *ht_rte; + FormData_chunk compressed_fd; + Oid compressed_reloid; + Oid compression_hypertable_reloid; + Oid compresseddata_oid; CompressionSettings *settings; @@ -42,15 +46,15 @@ typedef struct CompressionInfo /* compressed chunk attribute numbers for columns that are compressed */ Bitmapset *compressed_attnos_in_compressed_chunk; - bool single_chunk; /* query on explicit chunk */ - bool has_seq_num; /* legacy sequence number support */ - + bool single_chunk; /* query on explicit chunk */ + bool has_seq_num; /* legacy sequence number support */ + Relids parent_relids; /* relids of the parent hypertable and UNION */ } CompressionInfo; typedef struct DecompressChunkPath { CustomPath custom_path; - CompressionInfo *info; + const CompressionInfo *info; List *required_compressed_pathkeys; bool needs_sequence_num; diff --git a/tsl/src/nodes/decompress_chunk/decompress_context.h b/tsl/src/nodes/decompress_chunk/decompress_context.h index bd12927d77e..830f52f2e50 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_context.h +++ b/tsl/src/nodes/decompress_chunk/decompress_context.h @@ -73,7 +73,7 @@ typedef struct DecompressContext List *vectorized_quals_constified; bool reverse; - bool batch_sorted_merge; /* Merge append optimization enabled */ + bool batch_sorted_merge; /* Batch sorted merge optimization enabled. */ bool enable_bulk_decompression; /* diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index 18d1ec1e54b..1ffb68bd5a8 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -212,7 +212,7 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) node->ss.ss_ScanTupleSlot->tts_tupleDescriptor); } } - /* Sort keys should only be present when sorted_merge_append is used */ + /* Sort keys should only be present when batch sorted merge is used. */ Assert(dcontext->batch_sorted_merge == true || list_length(chunk_state->sortinfo) == 0); /* diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index 785b846e583..e9675282821 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -252,7 +252,7 @@ static void build_decompression_map(DecompressionMapContext *context, List *compressed_scan_tlist) { DecompressChunkPath *path = context->decompress_path; - CompressionInfo *info = path->info; + const CompressionInfo *info = path->info; /* * Track which normal and metadata columns we were able to find in the * targetlist. @@ -522,7 +522,7 @@ build_decompression_map(DecompressionMapContext *context, List *compressed_scan_ * uncompressed one. Based on replace_nestloop_params */ static Node * -replace_compressed_vars(Node *node, CompressionInfo *info) +replace_compressed_vars(Node *node, const CompressionInfo *info) { if (node == NULL) return NULL; diff --git a/tsl/test/expected/compression.out b/tsl/test/expected/compression.out index fc5e0ec56db..63d9701ed05 100644 --- a/tsl/test/expected/compression.out +++ b/tsl/test/expected/compression.out @@ -1766,35 +1766,18 @@ SELECT compress_chunk(i) FROM show_chunks('f_sensor_data') i; CALL reindex_compressed_hypertable('f_sensor_data'); VACUUM ANALYZE f_sensor_data; -- Encourage use of parallel plans +SET max_parallel_workers_per_gather = 4; +SET min_parallel_index_scan_size = 0; +SET min_parallel_table_scan_size = 0; SET parallel_setup_cost = 0; SET parallel_tuple_cost = 0; -SET min_parallel_table_scan_size TO '0'; -\set explain 'EXPLAIN (VERBOSE, COSTS OFF)' -SHOW min_parallel_table_scan_size; - min_parallel_table_scan_size ------------------------------- - 0 -(1 row) - SHOW max_parallel_workers; max_parallel_workers ---------------------- 8 (1 row) -SHOW max_parallel_workers_per_gather; - max_parallel_workers_per_gather ---------------------------------- - 2 -(1 row) - -SET max_parallel_workers_per_gather = 4; -SHOW max_parallel_workers_per_gather; - max_parallel_workers_per_gather ---------------------------------- - 4 -(1 row) - +\set explain 'EXPLAIN (VERBOSE, COSTS OFF)' -- We disable enable_parallel_append here to ensure -- that we create the same query plan in all PG 14.X versions SET enable_parallel_append = false; @@ -1816,24 +1799,19 @@ SELECT sum(cpu) FROM f_sensor_data; Output: compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1, compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature (12 rows) --- Encourage use of Index Scan -SET enable_seqscan = false; -SET enable_indexscan = true; -SET min_parallel_index_scan_size = 0; -SET min_parallel_table_scan_size = 0; CREATE INDEX ON f_sensor_data (time, sensor_id); :explain -SELECT * FROM f_sensor_data WHERE sensor_id > 100; +SELECT * FROM f_sensor_data WHERE sensor_id > 1000; QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Gather Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature - Workers Planned: 3 + Workers Planned: 2 -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature -> Parallel Index Scan using compress_hyper_38_74_chunk_sensor_id__ts_meta_min_1__ts_met_idx on _timescaledb_internal.compress_hyper_38_74_chunk Output: compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1, compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature - Index Cond: (compress_hyper_38_74_chunk.sensor_id > 100) + Index Cond: (compress_hyper_38_74_chunk.sensor_id > 1000) (8 rows) RESET enable_parallel_append; @@ -1849,6 +1827,7 @@ FROM generate_series(1700, 1800, 1 ) AS g2(sensor_id) ORDER BY time; +VACUUM ANALYZE f_sensor_data; :explain SELECT sum(cpu) FROM f_sensor_data; QUERY PLAN @@ -1859,10 +1838,6 @@ SELECT sum(cpu) FROM f_sensor_data; Output: (PARTIAL sum(_hyper_37_73_chunk.cpu)) Workers Planned: 4 -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_37_73_chunk.cpu) - -> Parallel Seq Scan on _timescaledb_internal._hyper_37_73_chunk - Output: _hyper_37_73_chunk.cpu -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_37_73_chunk.cpu)) Grouping Policy: all compressed batches @@ -1870,25 +1845,29 @@ SELECT sum(cpu) FROM f_sensor_data; Output: _hyper_37_73_chunk.cpu -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_74_chunk Output: compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1, compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature + -> Partial Aggregate + Output: PARTIAL sum(_hyper_37_73_chunk.cpu) + -> Parallel Seq Scan on _timescaledb_internal._hyper_37_73_chunk + Output: _hyper_37_73_chunk.cpu (17 rows) :explain -SELECT * FROM f_sensor_data WHERE sensor_id > 100; +SELECT * FROM f_sensor_data WHERE sensor_id > 1000; QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Gather Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature Workers Planned: 3 -> Parallel Append - -> Parallel Index Scan using _hyper_37_73_chunk_f_sensor_data_time_sensor_id_idx on _timescaledb_internal._hyper_37_73_chunk - Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature - Index Cond: (_hyper_37_73_chunk.sensor_id > 100) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature - Filter: (_hyper_37_73_chunk.sensor_id > 100) + Filter: (_hyper_37_73_chunk.sensor_id > 1000) -> Parallel Index Scan using compress_hyper_38_74_chunk_sensor_id__ts_meta_min_1__ts_met_idx on _timescaledb_internal.compress_hyper_38_74_chunk Output: compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1, compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature - Index Cond: (compress_hyper_38_74_chunk.sensor_id > 100) + Index Cond: (compress_hyper_38_74_chunk.sensor_id > 1000) + -> Parallel Seq Scan on _timescaledb_internal._hyper_37_73_chunk + Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature + Filter: (_hyper_37_73_chunk.sensor_id > 1000) (13 rows) -- Test non-partial paths below append are not executed multiple times diff --git a/tsl/test/expected/compression_ddl.out b/tsl/test/expected/compression_ddl.out index edb7008c317..72ee6f79f22 100644 --- a/tsl/test/expected/compression_ddl.out +++ b/tsl/test/expected/compression_ddl.out @@ -1527,22 +1527,25 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate - Group Key: _hyper_31_110_chunk.device_id - -> Merge Append - Sort Key: _hyper_31_110_chunk.device_id - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk - -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_112_chunk.device_id - -> Index Only Scan using _hyper_31_112_chunk_compression_insert_device_id_time_idx on _hyper_31_112_chunk -(13 rows) + Group Key: _hyper_31_112_chunk.device_id + -> Sort + Sort Key: _hyper_31_112_chunk.device_id + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial GroupAggregate + Group Key: _hyper_31_112_chunk.device_id + -> Parallel Index Only Scan using _hyper_31_112_chunk_compression_insert_device_id_time_idx on _hyper_31_112_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Parallel Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk + -> Parallel Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk +(16 rows) SELECT device_id, count(*) FROM compression_insert @@ -1613,25 +1616,28 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate - Group Key: _hyper_31_110_chunk.device_id - -> Merge Append - Sort Key: _hyper_31_110_chunk.device_id - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk - -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk - -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_114_chunk.device_id - -> Index Only Scan using _hyper_31_114_chunk_compression_insert_device_id_time_idx on _hyper_31_114_chunk -(16 rows) + Group Key: _hyper_31_114_chunk.device_id + -> Sort + Sort Key: _hyper_31_114_chunk.device_id + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial GroupAggregate + Group Key: _hyper_31_114_chunk.device_id + -> Parallel Index Only Scan using _hyper_31_114_chunk_compression_insert_device_id_time_idx on _hyper_31_114_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Parallel Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk + -> Parallel Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk + -> Parallel Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk +(19 rows) SELECT device_id, count(*) FROM compression_insert @@ -1702,28 +1708,31 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate - Group Key: _hyper_31_110_chunk.device_id - -> Merge Append - Sort Key: _hyper_31_110_chunk.device_id - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk - -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk - -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk - -> Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_117_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_116_chunk.device_id - -> Index Only Scan using _hyper_31_116_chunk_compression_insert_device_id_time_idx on _hyper_31_116_chunk -(19 rows) + Group Key: _hyper_31_116_chunk.device_id + -> Sort + Sort Key: _hyper_31_116_chunk.device_id + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial GroupAggregate + Group Key: _hyper_31_116_chunk.device_id + -> Parallel Index Only Scan using _hyper_31_116_chunk_compression_insert_device_id_time_idx on _hyper_31_116_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Parallel Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk + -> Parallel Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk + -> Parallel Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk + -> Parallel Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_117_chunk +(22 rows) SELECT device_id, count(*) FROM compression_insert @@ -1794,31 +1803,34 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate - Group Key: _hyper_31_110_chunk.device_id - -> Merge Append - Sort Key: _hyper_31_110_chunk.device_id - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk - -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk - -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk - -> Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_117_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_118_chunk - -> Index Scan using compress_hyper_32_119_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_119_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_118_chunk.device_id - -> Index Only Scan using _hyper_31_118_chunk_compression_insert_device_id_time_idx on _hyper_31_118_chunk -(22 rows) + Group Key: _hyper_31_118_chunk.device_id + -> Sort + Sort Key: _hyper_31_118_chunk.device_id + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Partial GroupAggregate + Group Key: _hyper_31_118_chunk.device_id + -> Parallel Index Only Scan using _hyper_31_118_chunk_compression_insert_device_id_time_idx on _hyper_31_118_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Parallel Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk + -> Parallel Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk + -> Parallel Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk + -> Parallel Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_117_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_118_chunk + -> Parallel Index Scan using compress_hyper_32_119_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_119_chunk +(25 rows) SELECT device_id, count(*) FROM compression_insert diff --git a/tsl/test/expected/hypercore.out b/tsl/test/expected/hypercore.out index c4ff2450f9a..e45994e3777 100644 --- a/tsl/test/expected/hypercore.out +++ b/tsl/test/expected/hypercore.out @@ -245,17 +245,15 @@ SET enable_indexscan = false; SET timescaledb.enable_transparent_decompression TO 'hypercore'; EXPLAIN (costs off, timing off, summary off) SELECT time, location, device, temp, humidity, jdata FROM :chunk WHERE device < 4 ORDER BY time, device LIMIT 5; - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------- Limit - -> Gather Merge - Workers Planned: 1 - -> Sort - Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk - -> Parallel Seq Scan on compress_hyper_2_32_chunk - Filter: (device < 4) -(8 rows) + -> Sort + Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Seq Scan on compress_hyper_2_32_chunk + Filter: (device < 4) +(6 rows) SELECT time, location, device, temp, humidity, jdata FROM :chunk WHERE device < 4 ORDER BY time, device LIMIT 5; time | location | device | temp | humidity | jdata diff --git a/tsl/test/expected/merge_append_partially_compressed-14.out b/tsl/test/expected/merge_append_partially_compressed-14.out index 9e6623fa073..fc2e4c63e5b 100644 --- a/tsl/test/expected/merge_append_partially_compressed-14.out +++ b/tsl/test/expected/merge_append_partially_compressed-14.out @@ -182,8 +182,8 @@ generate_series(1,3) device; (32 rows) :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Limit (actual rows=1 loops=1) -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) Order: ht_metrics_compressed."time" DESC @@ -194,8 +194,8 @@ generate_series(1,3) device; Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_6_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_6_chunk (actual rows=3 loops=1) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_1_3_chunk."time" DESC Sort Method: top-N heapsort @@ -207,8 +207,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_2_chunk."time" DESC -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_5_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_5_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_2_chunk."time" DESC -> Seq Scan on _hyper_1_2_chunk (never executed) @@ -219,8 +219,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_1_chunk."time" DESC -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_4_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_1_chunk."time" DESC -> Seq Scan on _hyper_1_1_chunk (never executed) @@ -228,8 +228,8 @@ generate_series(1,3) device; (41 rows) :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time, device DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Limit (actual rows=1 loops=1) -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) Order: ht_metrics_compressed."time", ht_metrics_compressed.device DESC @@ -240,8 +240,8 @@ generate_series(1,3) device; Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=81 loops=1) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=3 loops=1) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_4_chunk (actual rows=3 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device DESC Sort Method: top-N heapsort @@ -253,8 +253,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_5_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_5_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC -> Seq Scan on _hyper_1_2_chunk (never executed) @@ -265,8 +265,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_6_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_6_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_6_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC -> Seq Scan on _hyper_1_3_chunk (never executed) @@ -490,6 +490,58 @@ SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time, device DESC Filter: (device = ANY ('{1,2,3}'::integer[])) (39 rows) +-- Test direct ordered select from a single partially compressed chunk +select * from show_chunks('ht_metrics_compressed') chunk order by chunk limit 1 \gset +:PREFIX +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=1 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) +(9 rows) + +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + time | device | value +------------------------------+--------+------- + Thu Jan 02 00:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 00:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 06:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 09:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 12:00:00 2020 PST | 1 | 0.1 +(5 rows) + +:PREFIX +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk.device DESC, _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=4 loops=1) + -> Index Scan Backward using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=1 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.device DESC, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) +(9 rows) + +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + time | device | value +------------------------------+--------+------- + Wed Jan 08 12:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 09:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 06:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 00:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 00:00:00 2020 PST | 3 | 0.3 +(5 rows) + CREATE TABLE test1 ( time timestamptz NOT NULL, x1 integer, diff --git a/tsl/test/expected/merge_append_partially_compressed-15.out b/tsl/test/expected/merge_append_partially_compressed-15.out index 86fc416f7a8..0806da68bd1 100644 --- a/tsl/test/expected/merge_append_partially_compressed-15.out +++ b/tsl/test/expected/merge_append_partially_compressed-15.out @@ -188,8 +188,8 @@ generate_series(1,3) device; (35 rows) :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Limit (actual rows=1 loops=1) -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) Order: ht_metrics_compressed."time" DESC @@ -200,8 +200,8 @@ generate_series(1,3) device; Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_6_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_6_chunk (actual rows=3 loops=1) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_1_3_chunk."time" DESC Sort Method: top-N heapsort @@ -213,8 +213,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_2_chunk."time" DESC -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_5_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_5_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_2_chunk."time" DESC -> Seq Scan on _hyper_1_2_chunk (never executed) @@ -225,8 +225,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_1_chunk."time" DESC -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_4_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_1_chunk."time" DESC -> Seq Scan on _hyper_1_1_chunk (never executed) @@ -234,8 +234,8 @@ generate_series(1,3) device; (41 rows) :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time, device DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Limit (actual rows=1 loops=1) -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) Order: ht_metrics_compressed."time", ht_metrics_compressed.device DESC @@ -246,8 +246,8 @@ generate_series(1,3) device; Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=81 loops=1) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=3 loops=1) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_4_chunk (actual rows=3 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device DESC Sort Method: top-N heapsort @@ -259,8 +259,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_5_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_5_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC -> Seq Scan on _hyper_1_2_chunk (never executed) @@ -271,8 +271,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_6_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_6_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_6_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC -> Seq Scan on _hyper_1_3_chunk (never executed) @@ -496,6 +496,58 @@ SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time, device DESC Filter: (device = ANY ('{1,2,3}'::integer[])) (39 rows) +-- Test direct ordered select from a single partially compressed chunk +select * from show_chunks('ht_metrics_compressed') chunk order by chunk limit 1 \gset +:PREFIX +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=1 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) +(9 rows) + +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + time | device | value +------------------------------+--------+------- + Thu Jan 02 00:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 00:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 06:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 09:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 12:00:00 2020 PST | 1 | 0.1 +(5 rows) + +:PREFIX +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk.device DESC, _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=4 loops=1) + -> Index Scan Backward using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=1 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.device DESC, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) +(9 rows) + +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + time | device | value +------------------------------+--------+------- + Wed Jan 08 12:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 09:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 06:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 00:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 00:00:00 2020 PST | 3 | 0.3 +(5 rows) + CREATE TABLE test1 ( time timestamptz NOT NULL, x1 integer, diff --git a/tsl/test/expected/merge_append_partially_compressed-16.out b/tsl/test/expected/merge_append_partially_compressed-16.out index 86fc416f7a8..0806da68bd1 100644 --- a/tsl/test/expected/merge_append_partially_compressed-16.out +++ b/tsl/test/expected/merge_append_partially_compressed-16.out @@ -188,8 +188,8 @@ generate_series(1,3) device; (35 rows) :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Limit (actual rows=1 loops=1) -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) Order: ht_metrics_compressed."time" DESC @@ -200,8 +200,8 @@ generate_series(1,3) device; Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_6_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_6_chunk (actual rows=3 loops=1) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_1_3_chunk."time" DESC Sort Method: top-N heapsort @@ -213,8 +213,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_2_chunk."time" DESC -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_5_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_5_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_2_chunk."time" DESC -> Seq Scan on _hyper_1_2_chunk (never executed) @@ -225,8 +225,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_1_chunk."time" DESC -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_4_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_1_chunk."time" DESC -> Seq Scan on _hyper_1_1_chunk (never executed) @@ -234,8 +234,8 @@ generate_series(1,3) device; (41 rows) :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time, device DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Limit (actual rows=1 loops=1) -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) Order: ht_metrics_compressed."time", ht_metrics_compressed.device DESC @@ -246,8 +246,8 @@ generate_series(1,3) device; Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=81 loops=1) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=3 loops=1) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_4_chunk (actual rows=3 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device DESC Sort Method: top-N heapsort @@ -259,8 +259,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_5_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_5_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC -> Seq Scan on _hyper_1_2_chunk (never executed) @@ -271,8 +271,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_6_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_6_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_6_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC -> Seq Scan on _hyper_1_3_chunk (never executed) @@ -496,6 +496,58 @@ SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time, device DESC Filter: (device = ANY ('{1,2,3}'::integer[])) (39 rows) +-- Test direct ordered select from a single partially compressed chunk +select * from show_chunks('ht_metrics_compressed') chunk order by chunk limit 1 \gset +:PREFIX +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=1 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) +(9 rows) + +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + time | device | value +------------------------------+--------+------- + Thu Jan 02 00:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 00:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 06:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 09:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 12:00:00 2020 PST | 1 | 0.1 +(5 rows) + +:PREFIX +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk.device DESC, _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=4 loops=1) + -> Index Scan Backward using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=1 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.device DESC, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) +(9 rows) + +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + time | device | value +------------------------------+--------+------- + Wed Jan 08 12:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 09:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 06:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 00:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 00:00:00 2020 PST | 3 | 0.3 +(5 rows) + CREATE TABLE test1 ( time timestamptz NOT NULL, x1 integer, diff --git a/tsl/test/expected/merge_append_partially_compressed-17.out b/tsl/test/expected/merge_append_partially_compressed-17.out index 86fc416f7a8..0806da68bd1 100644 --- a/tsl/test/expected/merge_append_partially_compressed-17.out +++ b/tsl/test/expected/merge_append_partially_compressed-17.out @@ -188,8 +188,8 @@ generate_series(1,3) device; (35 rows) :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Limit (actual rows=1 loops=1) -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) Order: ht_metrics_compressed."time" DESC @@ -200,8 +200,8 @@ generate_series(1,3) device; Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=30 loops=1) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_6_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_6_chunk (actual rows=3 loops=1) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_6_chunk (actual rows=3 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_1_3_chunk."time" DESC Sort Method: top-N heapsort @@ -213,8 +213,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_2_chunk."time" DESC -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_5_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_5_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_2_chunk."time" DESC -> Seq Scan on _hyper_1_2_chunk (never executed) @@ -225,8 +225,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_1_chunk."time" DESC -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_4_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_1_chunk."time" DESC -> Seq Scan on _hyper_1_1_chunk (never executed) @@ -234,8 +234,8 @@ generate_series(1,3) device; (41 rows) :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY time, device DESC LIMIT 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------- Limit (actual rows=1 loops=1) -> Custom Scan (ChunkAppend) on ht_metrics_compressed (actual rows=1 loops=1) Order: ht_metrics_compressed."time", ht_metrics_compressed.device DESC @@ -246,8 +246,8 @@ generate_series(1,3) device; Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=81 loops=1) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=3 loops=1) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_4_chunk (actual rows=3 loops=1) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_1_1_chunk."time", _hyper_1_1_chunk.device DESC Sort Method: top-N heapsort @@ -259,8 +259,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_5_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_5_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_5_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device DESC -> Seq Scan on _hyper_1_2_chunk (never executed) @@ -271,8 +271,8 @@ generate_series(1,3) device; Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) Filter: (device = ANY ('{1,2,3}'::integer[])) - -> Index Scan using compress_hyper_2_6_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_6_chunk (never executed) - Index Cond: (device = ANY ('{1,2,3}'::integer[])) + -> Seq Scan on compress_hyper_2_6_chunk (never executed) + Filter: (device = ANY ('{1,2,3}'::integer[])) -> Sort (never executed) Sort Key: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device DESC -> Seq Scan on _hyper_1_3_chunk (never executed) @@ -496,6 +496,58 @@ SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time, device DESC Filter: (device = ANY ('{1,2,3}'::integer[])) (39 rows) +-- Test direct ordered select from a single partially compressed chunk +select * from show_chunks('ht_metrics_compressed') chunk order by chunk limit 1 \gset +:PREFIX +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3 loops=1) + -> Index Scan using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=1 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_1_1_chunk.device, _hyper_1_1_chunk."time" + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) +(9 rows) + +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + time | device | value +------------------------------+--------+------- + Thu Jan 02 00:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 00:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 06:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 09:00:00 2020 PST | 1 | 0.1 + Thu Jan 02 12:00:00 2020 PST | 1 | 0.1 +(5 rows) + +:PREFIX +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=5 loops=1) + -> Merge Append (actual rows=5 loops=1) + Sort Key: _hyper_1_1_chunk.device DESC, _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=4 loops=1) + -> Index Scan Backward using compress_hyper_2_4_chunk_device__ts_meta_min_1__ts_meta_max_idx on compress_hyper_2_4_chunk (actual rows=1 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.device DESC, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Seq Scan on _hyper_1_1_chunk (actual rows=54 loops=1) +(9 rows) + +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + time | device | value +------------------------------+--------+------- + Wed Jan 08 12:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 09:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 06:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 00:00:00 2020 PST | 3 | 0.3 + Wed Jan 08 00:00:00 2020 PST | 3 | 0.3 +(5 rows) + CREATE TABLE test1 ( time timestamptz NOT NULL, x1 integer, diff --git a/tsl/test/expected/recompress_chunk_segmentwise.out b/tsl/test/expected/recompress_chunk_segmentwise.out index db795bbe76f..c60948eb1b9 100644 --- a/tsl/test/expected/recompress_chunk_segmentwise.out +++ b/tsl/test/expected/recompress_chunk_segmentwise.out @@ -130,9 +130,9 @@ select * from :chunk_to_compress_2 ORDER BY a, c, time DESC; time | a | b | c -------------------------------------+---+---+--- Sun Jan 01 11:56:20.048355 2023 PST | 2 | | 2 + Sun Jan 01 09:56:20.048355 2023 PST | 2 | | 2 Sun Jan 01 11:57:20.048355 2023 PST | 3 | | 3 Sun Jan 01 11:56:20.048355 2023 PST | 3 | | 3 - Sun Jan 01 09:56:20.048355 2023 PST | 2 | | 2 (4 rows) SELECT compressed_chunk_schema || '.' || compressed_chunk_name as compressed_chunk_name_2 diff --git a/tsl/test/expected/transparent_decompression-14.out b/tsl/test/expected/transparent_decompression-14.out index 6f7935e9e32..b26fd211c4f 100644 --- a/tsl/test/expected/transparent_decompression-14.out +++ b/tsl/test/expected/transparent_decompression-14.out @@ -1508,28 +1508,23 @@ ORDER BY device_id DESC, time; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (actual rows=3915 loops=1) + Sort (actual rows=3915 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" - -> Sort (actual rows=2235 loops=1) - Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 - Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" - Sort Method: quicksort + Sort Method: quicksort + -> Append (actual rows=3915 loops=1) -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=2235 loops=1) Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) Rows Removed by Filter: 1125 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1680 loops=1) - Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 - Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=5 loops=1) - Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 - Sort Key: compress_hyper_5_16_chunk.device_id DESC, compress_hyper_5_16_chunk.device_id_peer DESC, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2 DESC, compress_hyper_5_16_chunk._ts_meta_max_2 DESC, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3 - Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1680 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) -(21 rows) +(16 rows) -- should not produce ordered path :PREFIX_VERBOSE @@ -5366,43 +5361,41 @@ ORDER BY device_id DESC, time; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (actual rows=3915 loops=1) + Sort (actual rows=3915 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 Sort Key: _hyper_2_7_chunk.device_id DESC, _hyper_2_7_chunk.device_id_peer DESC, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time" - -> Index Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=447 loops=1) - Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 - Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk (actual rows=1341 loops=1) - Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 - Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk (actual rows=447 loops=1) - Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 - Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=336 loops=1) - Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 - Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=1 loops=1) - Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 - Sort Key: compress_hyper_6_20_chunk.device_id DESC, compress_hyper_6_20_chunk.device_id_peer DESC, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2 DESC, compress_hyper_6_20_chunk._ts_meta_max_2 DESC, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3 - Sort Method: quicksort + Sort Method: quicksort + -> Append (actual rows=3915 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=447 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 225 + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=1341 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Filter: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 675 + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=447 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Filter: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 225 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=336 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1008 loops=1) - Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 - Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=3 loops=1) - Output: compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3 - Sort Key: compress_hyper_6_21_chunk.device_id DESC, compress_hyper_6_21_chunk.device_id_peer DESC, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2 DESC, compress_hyper_6_21_chunk._ts_meta_max_2 DESC, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3 - Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1008 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) Output: compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3 Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk (actual rows=336 loops=1) - Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 - Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) -(36 rows) + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=336 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Filter: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(34 rows) -- should not produce ordered path :PREFIX_VERBOSE diff --git a/tsl/test/expected/transparent_decompression-15.out b/tsl/test/expected/transparent_decompression-15.out index 5677fa7d695..bbc844d30d7 100644 --- a/tsl/test/expected/transparent_decompression-15.out +++ b/tsl/test/expected/transparent_decompression-15.out @@ -1508,28 +1508,23 @@ ORDER BY device_id DESC, time; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (actual rows=3915 loops=1) + Sort (actual rows=3915 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" - -> Sort (actual rows=2235 loops=1) - Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 - Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" - Sort Method: quicksort + Sort Method: quicksort + -> Append (actual rows=3915 loops=1) -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=2235 loops=1) Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) Rows Removed by Filter: 1125 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1680 loops=1) - Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 - Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=5 loops=1) - Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 - Sort Key: compress_hyper_5_16_chunk.device_id DESC, compress_hyper_5_16_chunk.device_id_peer DESC, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2 DESC, compress_hyper_5_16_chunk._ts_meta_max_2 DESC, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3 - Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1680 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) -(21 rows) +(16 rows) -- should not produce ordered path :PREFIX_VERBOSE @@ -5366,43 +5361,41 @@ ORDER BY device_id DESC, time; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (actual rows=3915 loops=1) + Sort (actual rows=3915 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 Sort Key: _hyper_2_7_chunk.device_id DESC, _hyper_2_7_chunk.device_id_peer DESC, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time" - -> Index Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=447 loops=1) - Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 - Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk (actual rows=1341 loops=1) - Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 - Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk (actual rows=447 loops=1) - Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 - Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=336 loops=1) - Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 - Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=1 loops=1) - Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 - Sort Key: compress_hyper_6_20_chunk.device_id DESC, compress_hyper_6_20_chunk.device_id_peer DESC, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2 DESC, compress_hyper_6_20_chunk._ts_meta_max_2 DESC, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3 - Sort Method: quicksort + Sort Method: quicksort + -> Append (actual rows=3915 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=447 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 225 + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=1341 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Filter: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 675 + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=447 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Filter: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 225 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=336 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1008 loops=1) - Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 - Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=3 loops=1) - Output: compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3 - Sort Key: compress_hyper_6_21_chunk.device_id DESC, compress_hyper_6_21_chunk.device_id_peer DESC, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2 DESC, compress_hyper_6_21_chunk._ts_meta_max_2 DESC, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3 - Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1008 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) Output: compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3 Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk (actual rows=336 loops=1) - Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 - Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) -(36 rows) + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=336 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Filter: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(34 rows) -- should not produce ordered path :PREFIX_VERBOSE diff --git a/tsl/test/expected/transparent_decompression-16.out b/tsl/test/expected/transparent_decompression-16.out index c379010a68e..5b870a639f9 100644 --- a/tsl/test/expected/transparent_decompression-16.out +++ b/tsl/test/expected/transparent_decompression-16.out @@ -1508,28 +1508,23 @@ ORDER BY device_id DESC, time; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (actual rows=3915 loops=1) + Sort (actual rows=3915 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" - -> Sort (actual rows=2235 loops=1) - Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 - Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" - Sort Method: quicksort + Sort Method: quicksort + -> Append (actual rows=3915 loops=1) -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=2235 loops=1) Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) Rows Removed by Filter: 1125 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1680 loops=1) - Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 - Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=5 loops=1) - Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 - Sort Key: compress_hyper_5_16_chunk.device_id DESC, compress_hyper_5_16_chunk.device_id_peer DESC, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2 DESC, compress_hyper_5_16_chunk._ts_meta_max_2 DESC, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3 - Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1680 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) -(21 rows) +(16 rows) -- should not produce ordered path :PREFIX_VERBOSE @@ -5366,43 +5361,41 @@ ORDER BY device_id DESC, time; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (actual rows=3915 loops=1) + Sort (actual rows=3915 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 Sort Key: _hyper_2_7_chunk.device_id DESC, _hyper_2_7_chunk.device_id_peer DESC, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time" - -> Index Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=447 loops=1) - Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 - Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk (actual rows=1341 loops=1) - Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 - Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk (actual rows=447 loops=1) - Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 - Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=336 loops=1) - Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 - Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=1 loops=1) - Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 - Sort Key: compress_hyper_6_20_chunk.device_id DESC, compress_hyper_6_20_chunk.device_id_peer DESC, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2 DESC, compress_hyper_6_20_chunk._ts_meta_max_2 DESC, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3 - Sort Method: quicksort + Sort Method: quicksort + -> Append (actual rows=3915 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=447 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 225 + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=1341 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Filter: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 675 + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=447 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Filter: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 225 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=336 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1008 loops=1) - Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 - Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=3 loops=1) - Output: compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3 - Sort Key: compress_hyper_6_21_chunk.device_id DESC, compress_hyper_6_21_chunk.device_id_peer DESC, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2 DESC, compress_hyper_6_21_chunk._ts_meta_max_2 DESC, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3 - Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1008 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) Output: compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3 Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk (actual rows=336 loops=1) - Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 - Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) -(36 rows) + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=336 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Filter: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(34 rows) -- should not produce ordered path :PREFIX_VERBOSE diff --git a/tsl/test/expected/transparent_decompression-17.out b/tsl/test/expected/transparent_decompression-17.out index c379010a68e..5b870a639f9 100644 --- a/tsl/test/expected/transparent_decompression-17.out +++ b/tsl/test/expected/transparent_decompression-17.out @@ -1508,28 +1508,23 @@ ORDER BY device_id DESC, time; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (actual rows=3915 loops=1) + Sort (actual rows=3915 loops=1) + Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" - -> Sort (actual rows=2235 loops=1) - Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 - Sort Key: _hyper_1_2_chunk.device_id DESC, _hyper_1_2_chunk.device_id_peer DESC, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1 DESC, _hyper_1_2_chunk."time" - Sort Method: quicksort + Sort Method: quicksort + -> Append (actual rows=3915 loops=1) -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=2235 loops=1) Output: _hyper_1_2_chunk."time", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.device_id_peer, _hyper_1_2_chunk.v0, _hyper_1_2_chunk.v1, _hyper_1_2_chunk.v2, _hyper_1_2_chunk.v3 Filter: (_hyper_1_2_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) Rows Removed by Filter: 1125 - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1680 loops=1) - Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 - Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=5 loops=1) - Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 - Sort Key: compress_hyper_5_16_chunk.device_id DESC, compress_hyper_5_16_chunk.device_id_peer DESC, compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk._ts_meta_min_2 DESC, compress_hyper_5_16_chunk._ts_meta_max_2 DESC, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3 - Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=1680 loops=1) + Output: _hyper_1_3_chunk."time", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.device_id_peer, _hyper_1_3_chunk.v0, _hyper_1_3_chunk.v1, _hyper_1_3_chunk.v2, _hyper_1_3_chunk.v3 + Vectorized Filter: (_hyper_1_3_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=5 loops=1) Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 Filter: (compress_hyper_5_16_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) -(21 rows) +(16 rows) -- should not produce ordered path :PREFIX_VERBOSE @@ -5366,43 +5361,41 @@ ORDER BY device_id DESC, time; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (actual rows=3915 loops=1) + Sort (actual rows=3915 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 Sort Key: _hyper_2_7_chunk.device_id DESC, _hyper_2_7_chunk.device_id_peer DESC, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1 DESC, _hyper_2_7_chunk."time" - -> Index Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=447 loops=1) - Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 - Index Cond: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_8_chunk (actual rows=1341 loops=1) - Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 - Index Cond: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_9_chunk (actual rows=447 loops=1) - Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 - Index Cond: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=336 loops=1) - Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 - Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=1 loops=1) - Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 - Sort Key: compress_hyper_6_20_chunk.device_id DESC, compress_hyper_6_20_chunk.device_id_peer DESC, compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk._ts_meta_min_2 DESC, compress_hyper_6_20_chunk._ts_meta_max_2 DESC, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3 - Sort Method: quicksort + Sort Method: quicksort + -> Append (actual rows=3915 loops=1) + -> Seq Scan on _timescaledb_internal._hyper_2_7_chunk (actual rows=447 loops=1) + Output: _hyper_2_7_chunk."time", _hyper_2_7_chunk.device_id, _hyper_2_7_chunk.device_id_peer, _hyper_2_7_chunk.v0, _hyper_2_7_chunk.v1, _hyper_2_7_chunk.v2, _hyper_2_7_chunk.v3 + Filter: (_hyper_2_7_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 225 + -> Seq Scan on _timescaledb_internal._hyper_2_8_chunk (actual rows=1341 loops=1) + Output: _hyper_2_8_chunk."time", _hyper_2_8_chunk.device_id, _hyper_2_8_chunk.device_id_peer, _hyper_2_8_chunk.v0, _hyper_2_8_chunk.v1, _hyper_2_8_chunk.v2, _hyper_2_8_chunk.v3 + Filter: (_hyper_2_8_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 675 + -> Seq Scan on _timescaledb_internal._hyper_2_9_chunk (actual rows=447 loops=1) + Output: _hyper_2_9_chunk."time", _hyper_2_9_chunk.device_id, _hyper_2_9_chunk.device_id_peer, _hyper_2_9_chunk.v0, _hyper_2_9_chunk.v1, _hyper_2_9_chunk.v2, _hyper_2_9_chunk.v3 + Filter: (_hyper_2_9_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Rows Removed by Filter: 225 + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=336 loops=1) + Output: _hyper_2_10_chunk."time", _hyper_2_10_chunk.device_id, _hyper_2_10_chunk.device_id_peer, _hyper_2_10_chunk.v0, _hyper_2_10_chunk.v1, _hyper_2_10_chunk.v2, _hyper_2_10_chunk.v3 + Vectorized Filter: (_hyper_2_10_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 Filter: (compress_hyper_6_20_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1008 loops=1) - Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 - Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - Bulk Decompression: true - -> Sort (actual rows=3 loops=1) - Output: compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3 - Sort Key: compress_hyper_6_21_chunk.device_id DESC, compress_hyper_6_21_chunk.device_id_peer DESC, compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk._ts_meta_min_2 DESC, compress_hyper_6_21_chunk._ts_meta_max_2 DESC, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3 - Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_11_chunk (actual rows=1008 loops=1) + Output: _hyper_2_11_chunk."time", _hyper_2_11_chunk.device_id, _hyper_2_11_chunk.device_id_peer, _hyper_2_11_chunk.v0, _hyper_2_11_chunk.v1, _hyper_2_11_chunk.v2, _hyper_2_11_chunk.v3 + Vectorized Filter: (_hyper_2_11_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) + Bulk Decompression: true -> Seq Scan on _timescaledb_internal.compress_hyper_6_21_chunk (actual rows=3 loops=1) Output: compress_hyper_6_21_chunk._ts_meta_count, compress_hyper_6_21_chunk.device_id, compress_hyper_6_21_chunk.device_id_peer, compress_hyper_6_21_chunk._ts_meta_min_3, compress_hyper_6_21_chunk._ts_meta_max_3, compress_hyper_6_21_chunk."time", compress_hyper_6_21_chunk._ts_meta_min_1, compress_hyper_6_21_chunk._ts_meta_max_1, compress_hyper_6_21_chunk.v0, compress_hyper_6_21_chunk._ts_meta_min_2, compress_hyper_6_21_chunk._ts_meta_max_2, compress_hyper_6_21_chunk.v1, compress_hyper_6_21_chunk.v2, compress_hyper_6_21_chunk.v3 Filter: (compress_hyper_6_21_chunk._ts_meta_max_3 > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) - -> Index Scan using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _timescaledb_internal._hyper_2_12_chunk (actual rows=336 loops=1) - Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 - Index Cond: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) -(36 rows) + -> Seq Scan on _timescaledb_internal._hyper_2_12_chunk (actual rows=336 loops=1) + Output: _hyper_2_12_chunk."time", _hyper_2_12_chunk.device_id, _hyper_2_12_chunk.device_id_peer, _hyper_2_12_chunk.v0, _hyper_2_12_chunk.v1, _hyper_2_12_chunk.v2, _hyper_2_12_chunk.v3 + Filter: (_hyper_2_12_chunk."time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone) +(34 rows) -- should not produce ordered path :PREFIX_VERBOSE diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index 930d105a2ed..9a7456f2f6b 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -185,21 +185,17 @@ select sum(c) from dvagg having sum(c) > 0; -- Some negative cases. set timescaledb.debug_require_vector_agg to 'forbid'; explain (costs off) select sum(c) from dvagg group by grouping sets ((), (a)); - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------- MixedAggregate Hash Key: _hyper_1_1_chunk.a Group Key: () -> Append -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk - -> Sort - Sort Key: compress_hyper_2_2_chunk._ts_meta_min_1, compress_hyper_2_2_chunk._ts_meta_max_1 - -> Seq Scan on compress_hyper_2_2_chunk + -> Seq Scan on compress_hyper_2_2_chunk -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk - -> Sort - Sort Key: compress_hyper_2_4_chunk._ts_meta_min_1, compress_hyper_2_4_chunk._ts_meta_max_1 - -> Seq Scan on compress_hyper_2_4_chunk -(12 rows) + -> Seq Scan on compress_hyper_2_4_chunk +(8 rows) -- As a reference, the result on decompressed table. select decompress_chunk(show_chunks('dvagg')); diff --git a/tsl/test/expected/vector_agg_memory.out b/tsl/test/expected/vector_agg_memory.out index 1600fbba164..86faa1f54ad 100644 --- a/tsl/test/expected/vector_agg_memory.out +++ b/tsl/test/expected/vector_agg_memory.out @@ -72,34 +72,26 @@ select * from log where ( truncate log; set max_parallel_workers_per_gather = 0; set timescaledb.debug_require_vector_agg = 'require'; --- Despite the tweaks above, we are unable to force the HashAggregation, because --- the unsorted DecompressChunk paths for aggregation are not created properly --- (see issue #6836). Limit the memory consumed by tuplesort. -set work_mem = '64kB'; +-- We should reliably see HashAggregate here because of the tweaks we made above. explain (costs off) select ts_debug_allocated_bytes() bytes, count(*) a, count(t) b, sum(t) c, avg(t) d, min(t) e, max(t) f from mvagg where t >= -1 and t < 1000000 group by s1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------- - Finalize GroupAggregate + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Finalize HashAggregate Group Key: _hyper_1_1_chunk.s1 - -> Merge Append - Sort Key: _hyper_1_1_chunk.s1 + -> Append -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk Vectorized Filter: ((t >= '-1'::integer) AND (t < 1000000)) - -> Sort - Sort Key: compress_hyper_2_3_chunk.s1 - -> Seq Scan on compress_hyper_2_3_chunk - Filter: ((_ts_meta_max_1 >= '-1'::integer) AND (_ts_meta_min_1 < 1000000)) + -> Seq Scan on compress_hyper_2_3_chunk + Filter: ((_ts_meta_max_1 >= '-1'::integer) AND (_ts_meta_min_1 < 1000000)) -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk Vectorized Filter: ((t >= '-1'::integer) AND (t < 1000000)) - -> Sort - Sort Key: compress_hyper_2_4_chunk.s1 - -> Index Scan using compress_hyper_2_4_chunk_s0_s1__ts_meta_min_1__ts_meta_max__idx on compress_hyper_2_4_chunk - Index Cond: ((_ts_meta_min_1 < 1000000) AND (_ts_meta_max_1 >= '-1'::integer)) -(18 rows) + -> Index Scan using compress_hyper_2_4_chunk_s0_s1__ts_meta_min_1__ts_meta_max__idx on compress_hyper_2_4_chunk + Index Cond: ((_ts_meta_min_1 < 1000000) AND (_ts_meta_max_1 >= '-1'::integer)) +(13 rows) \set ECHO none reset timescaledb.debug_require_vector_agg; @@ -108,10 +100,8 @@ reset work_mem; select * from log where ( -- For aggregation by segmentby, memory usage should be constant regardless -- of the number of tuples. Still, we have to allow for small variations - -- that can be caused by other reasons. Currently the major increase is - -- caused by tuplesort, because we are unable to force hash aggregation due - -- to unrelated planning bugs. - select regr_slope(bytes, n) > 0.05 from log + -- that can be caused by other reasons. + select regr_slope(bytes, n) > 0.01 from log ); n | bytes | a | b | c | d | e | f ---+-------+---+---+---+---+---+--- diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 01f690b743a..3634701de2f 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -40,6 +40,7 @@ SELECT compress_chunk(ch) FROM show_chunks('testtable') ch LIMIT 3; _timescaledb_internal._hyper_1_3_chunk (3 rows) +VACUUM ANALYZE testtable; -- Vectorized aggregation possible SELECT sum(segment_by_value) FROM testtable; sum @@ -49,635 +50,608 @@ SELECT sum(segment_by_value) FROM testtable; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value -(55 rows) + -> Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value +(52 rows) -- Vectorization possible - filter on segment_by :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - Filter: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - Filter: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - Filter: (compress_hyper_2_13_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - Filter: (_hyper_1_4_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - Filter: (_hyper_1_5_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - Filter: (_hyper_1_6_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - Filter: (_hyper_1_7_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - Filter: (_hyper_1_8_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - Filter: (_hyper_1_9_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value - Filter: (_hyper_1_10_chunk.segment_by_value > 0) -(65 rows) + -> Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + Filter: (compress_hyper_2_11_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + Filter: (compress_hyper_2_12_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + Filter: (compress_hyper_2_13_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + Filter: (_hyper_1_4_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + Filter: (_hyper_1_5_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + Filter: (_hyper_1_6_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + Filter: (_hyper_1_7_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + Filter: (_hyper_1_8_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + Filter: (_hyper_1_9_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + Filter: (_hyper_1_10_chunk.segment_by_value > 0) +(62 rows) -- Vectorization with filter on compressed columns :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0 AND int_value > 0; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - Filter: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - Filter: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - Filter: (compress_hyper_2_13_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - Filter: ((_hyper_1_4_chunk.segment_by_value > 0) AND (_hyper_1_4_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - Filter: ((_hyper_1_5_chunk.segment_by_value > 0) AND (_hyper_1_5_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - Filter: ((_hyper_1_6_chunk.segment_by_value > 0) AND (_hyper_1_6_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - Filter: ((_hyper_1_7_chunk.segment_by_value > 0) AND (_hyper_1_7_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - Filter: ((_hyper_1_8_chunk.segment_by_value > 0) AND (_hyper_1_8_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - Filter: ((_hyper_1_9_chunk.segment_by_value > 0) AND (_hyper_1_9_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value - Filter: ((_hyper_1_10_chunk.segment_by_value > 0) AND (_hyper_1_10_chunk.int_value > 0)) -(68 rows) + -> Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + Filter: (compress_hyper_2_11_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + Filter: (compress_hyper_2_12_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + Filter: (compress_hyper_2_13_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + Filter: ((_hyper_1_4_chunk.segment_by_value > 0) AND (_hyper_1_4_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + Filter: ((_hyper_1_5_chunk.segment_by_value > 0) AND (_hyper_1_5_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + Filter: ((_hyper_1_6_chunk.segment_by_value > 0) AND (_hyper_1_6_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + Filter: ((_hyper_1_7_chunk.segment_by_value > 0) AND (_hyper_1_7_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + Filter: ((_hyper_1_8_chunk.segment_by_value > 0) AND (_hyper_1_8_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + Filter: ((_hyper_1_9_chunk.segment_by_value > 0) AND (_hyper_1_9_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + Filter: ((_hyper_1_10_chunk.segment_by_value > 0) AND (_hyper_1_10_chunk.int_value > 0)) +(65 rows) :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE int_value > 0; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - Filter: (_hyper_1_4_chunk.int_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - Filter: (_hyper_1_5_chunk.int_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - Filter: (_hyper_1_6_chunk.int_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - Filter: (_hyper_1_7_chunk.int_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - Filter: (_hyper_1_8_chunk.int_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - Filter: (_hyper_1_9_chunk.int_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value - Filter: (_hyper_1_10_chunk.int_value > 0) -(65 rows) + -> Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + Filter: (_hyper_1_4_chunk.int_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + Filter: (_hyper_1_5_chunk.int_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + Filter: (_hyper_1_6_chunk.int_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + Filter: (_hyper_1_7_chunk.int_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + Filter: (_hyper_1_8_chunk.int_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + Filter: (_hyper_1_9_chunk.int_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + Filter: (_hyper_1_10_chunk.int_value > 0) +(62 rows) :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE float_value > 0; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - Vectorized Filter: (_hyper_1_1_chunk.float_value > '0'::double precision) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - Vectorized Filter: (_hyper_1_2_chunk.float_value > '0'::double precision) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - Vectorized Filter: (_hyper_1_3_chunk.float_value > '0'::double precision) - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - Filter: (_hyper_1_4_chunk.float_value > '0'::double precision) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - Filter: (_hyper_1_5_chunk.float_value > '0'::double precision) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - Filter: (_hyper_1_6_chunk.float_value > '0'::double precision) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - Filter: (_hyper_1_7_chunk.float_value > '0'::double precision) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - Filter: (_hyper_1_8_chunk.float_value > '0'::double precision) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - Filter: (_hyper_1_9_chunk.float_value > '0'::double precision) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value - Filter: (_hyper_1_10_chunk.float_value > '0'::double precision) -(65 rows) + -> Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + Vectorized Filter: (_hyper_1_1_chunk.float_value > '0'::double precision) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + Vectorized Filter: (_hyper_1_2_chunk.float_value > '0'::double precision) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + Vectorized Filter: (_hyper_1_3_chunk.float_value > '0'::double precision) + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + Filter: (_hyper_1_4_chunk.float_value > '0'::double precision) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + Filter: (_hyper_1_5_chunk.float_value > '0'::double precision) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + Filter: (_hyper_1_6_chunk.float_value > '0'::double precision) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + Filter: (_hyper_1_7_chunk.float_value > '0'::double precision) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + Filter: (_hyper_1_8_chunk.float_value > '0'::double precision) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + Filter: (_hyper_1_9_chunk.float_value > '0'::double precision) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + Filter: (_hyper_1_10_chunk.float_value > '0'::double precision) +(62 rows) -- Vectorization not possible due grouping :EXPLAIN SELECT sum(segment_by_value) FROM testtable GROUP BY float_value; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize HashAggregate Output: sum(_hyper_1_1_chunk.segment_by_value), _hyper_1_1_chunk.float_value Group Key: _hyper_1_1_chunk.float_value - -> Gather - Output: _hyper_1_1_chunk.float_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Partial HashAggregate - Output: _hyper_1_1_chunk.float_value, PARTIAL sum(_hyper_1_1_chunk.segment_by_value) - Group Key: _hyper_1_1_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.float_value, _hyper_1_1_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_2_chunk.float_value, PARTIAL sum(_hyper_1_2_chunk.segment_by_value) - Group Key: _hyper_1_2_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.float_value, _hyper_1_2_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_3_chunk.float_value, PARTIAL sum(_hyper_1_3_chunk.segment_by_value) - Group Key: _hyper_1_3_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.float_value, _hyper_1_3_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_4_chunk.float_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - Group Key: _hyper_1_4_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.float_value, _hyper_1_4_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_5_chunk.float_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - Group Key: _hyper_1_5_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.float_value, _hyper_1_5_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_6_chunk.float_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - Group Key: _hyper_1_6_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.float_value, _hyper_1_6_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_7_chunk.float_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - Group Key: _hyper_1_7_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.float_value, _hyper_1_7_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_8_chunk.float_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - Group Key: _hyper_1_8_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.float_value, _hyper_1_8_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_9_chunk.float_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - Group Key: _hyper_1_9_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.float_value, _hyper_1_9_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_10_chunk.float_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - Group Key: _hyper_1_10_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.float_value, _hyper_1_10_chunk.segment_by_value -(63 rows) + -> Append + -> Partial HashAggregate + Output: _hyper_1_1_chunk.float_value, PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + Group Key: _hyper_1_1_chunk.float_value + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.float_value, _hyper_1_1_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_2_chunk.float_value, PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + Group Key: _hyper_1_2_chunk.float_value + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.float_value, _hyper_1_2_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_3_chunk.float_value, PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + Group Key: _hyper_1_3_chunk.float_value + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.float_value, _hyper_1_3_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_4_chunk.float_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + Group Key: _hyper_1_4_chunk.float_value + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.float_value, _hyper_1_4_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_5_chunk.float_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + Group Key: _hyper_1_5_chunk.float_value + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.float_value, _hyper_1_5_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_6_chunk.float_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + Group Key: _hyper_1_6_chunk.float_value + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.float_value, _hyper_1_6_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_7_chunk.float_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + Group Key: _hyper_1_7_chunk.float_value + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.float_value, _hyper_1_7_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_8_chunk.float_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + Group Key: _hyper_1_8_chunk.float_value + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.float_value, _hyper_1_8_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_9_chunk.float_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + Group Key: _hyper_1_9_chunk.float_value + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.float_value, _hyper_1_9_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_10_chunk.float_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + Group Key: _hyper_1_10_chunk.float_value + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.float_value, _hyper_1_10_chunk.segment_by_value +(60 rows) :EXPLAIN SELECT sum(segment_by_value) FROM testtable GROUP BY int_value; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize HashAggregate Output: sum(_hyper_1_1_chunk.segment_by_value), _hyper_1_1_chunk.int_value Group Key: _hyper_1_1_chunk.int_value - -> Gather - Output: _hyper_1_1_chunk.int_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Partial HashAggregate - Output: _hyper_1_1_chunk.int_value, PARTIAL sum(_hyper_1_1_chunk.segment_by_value) - Group Key: _hyper_1_1_chunk.int_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.int_value, _hyper_1_1_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_2_chunk.int_value, PARTIAL sum(_hyper_1_2_chunk.segment_by_value) - Group Key: _hyper_1_2_chunk.int_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.int_value, _hyper_1_2_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_3_chunk.int_value, PARTIAL sum(_hyper_1_3_chunk.segment_by_value) - Group Key: _hyper_1_3_chunk.int_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.int_value, _hyper_1_3_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_4_chunk.int_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - Group Key: _hyper_1_4_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.int_value, _hyper_1_4_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_5_chunk.int_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - Group Key: _hyper_1_5_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.int_value, _hyper_1_5_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_6_chunk.int_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - Group Key: _hyper_1_6_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.int_value, _hyper_1_6_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_7_chunk.int_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - Group Key: _hyper_1_7_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.int_value, _hyper_1_7_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_8_chunk.int_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - Group Key: _hyper_1_8_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.int_value, _hyper_1_8_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_9_chunk.int_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - Group Key: _hyper_1_9_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.int_value, _hyper_1_9_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_10_chunk.int_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - Group Key: _hyper_1_10_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.int_value, _hyper_1_10_chunk.segment_by_value -(63 rows) + -> Append + -> Partial HashAggregate + Output: _hyper_1_1_chunk.int_value, PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + Group Key: _hyper_1_1_chunk.int_value + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value, _hyper_1_1_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_2_chunk.int_value, PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + Group Key: _hyper_1_2_chunk.int_value + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value, _hyper_1_2_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_3_chunk.int_value, PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + Group Key: _hyper_1_3_chunk.int_value + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value, _hyper_1_3_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_4_chunk.int_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + Group Key: _hyper_1_4_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value, _hyper_1_4_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_5_chunk.int_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + Group Key: _hyper_1_5_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value, _hyper_1_5_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_6_chunk.int_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + Group Key: _hyper_1_6_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value, _hyper_1_6_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_7_chunk.int_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + Group Key: _hyper_1_7_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value, _hyper_1_7_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_8_chunk.int_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + Group Key: _hyper_1_8_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value, _hyper_1_8_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_9_chunk.int_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + Group Key: _hyper_1_9_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value, _hyper_1_9_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_10_chunk.int_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + Group Key: _hyper_1_10_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value, _hyper_1_10_chunk.segment_by_value +(60 rows) -- Vectorization possible with grouping by a segmentby column. :EXPLAIN SELECT sum(int_value) FROM testtable GROUP BY segment_by_value; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize HashAggregate Output: sum(_hyper_1_1_chunk.int_value), _hyper_1_1_chunk.segment_by_value Group Key: _hyper_1_1_chunk.segment_by_value - -> Gather - Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Workers Planned: 2 - -> Parallel Append + -> Append + -> Custom Scan (VectorAgg) + Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.int_value)) + Grouping Policy: per compressed batch + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value, _hyper_1_1_chunk.int_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: _hyper_1_2_chunk.segment_by_value, (PARTIAL sum(_hyper_1_2_chunk.int_value)) + Grouping Policy: per compressed batch + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value, _hyper_1_2_chunk.int_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: _hyper_1_3_chunk.segment_by_value, (PARTIAL sum(_hyper_1_3_chunk.int_value)) + Grouping Policy: per compressed batch + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value, _hyper_1_3_chunk.int_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.int_value) + Group Key: _hyper_1_4_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value, _hyper_1_4_chunk.int_value + -> Partial HashAggregate + Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.int_value) + Group Key: _hyper_1_5_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value, _hyper_1_5_chunk.int_value + -> Partial HashAggregate + Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.int_value) + Group Key: _hyper_1_6_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value, _hyper_1_6_chunk.int_value + -> Partial HashAggregate + Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.int_value) + Group Key: _hyper_1_7_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value, _hyper_1_7_chunk.int_value + -> Partial HashAggregate + Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.int_value) + Group Key: _hyper_1_8_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value, _hyper_1_8_chunk.int_value + -> Partial HashAggregate + Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.int_value) + Group Key: _hyper_1_9_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value, _hyper_1_9_chunk.int_value + -> Partial HashAggregate + Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.int_value) + Group Key: _hyper_1_10_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value, _hyper_1_10_chunk.int_value +(60 rows) + +:EXPLAIN +SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(_hyper_1_1_chunk.segment_by_value)), _hyper_1_1_chunk.segment_by_value + Sort Key: (sum(_hyper_1_1_chunk.segment_by_value)), _hyper_1_1_chunk.segment_by_value + -> Finalize HashAggregate + Output: sum(_hyper_1_1_chunk.segment_by_value), _hyper_1_1_chunk.segment_by_value + Group Key: _hyper_1_1_chunk.segment_by_value + -> Append -> Custom Scan (VectorAgg) - Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.int_value)) + Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Grouping Policy: per compressed batch -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value, _hyper_1_1_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: _hyper_1_2_chunk.segment_by_value, (PARTIAL sum(_hyper_1_2_chunk.int_value)) + Output: _hyper_1_2_chunk.segment_by_value, (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) Grouping Policy: per compressed batch -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value, _hyper_1_2_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: _hyper_1_3_chunk.segment_by_value, (PARTIAL sum(_hyper_1_3_chunk.int_value)) + Output: _hyper_1_3_chunk.segment_by_value, (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) Grouping Policy: per compressed batch -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value, _hyper_1_3_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Partial HashAggregate - Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.int_value) + Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) Group Key: _hyper_1_4_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value, _hyper_1_4_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value -> Partial HashAggregate - Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.int_value) + Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) Group Key: _hyper_1_5_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value, _hyper_1_5_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value -> Partial HashAggregate - Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.int_value) + Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) Group Key: _hyper_1_6_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value, _hyper_1_6_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value -> Partial HashAggregate - Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.int_value) + Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) Group Key: _hyper_1_7_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value, _hyper_1_7_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value -> Partial HashAggregate - Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.int_value) + Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) Group Key: _hyper_1_8_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value, _hyper_1_8_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value -> Partial HashAggregate - Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.int_value) + Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) Group Key: _hyper_1_9_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value, _hyper_1_9_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value -> Partial HashAggregate - Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.int_value) + Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) Group Key: _hyper_1_10_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value, _hyper_1_10_chunk.int_value + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value (63 rows) -:EXPLAIN -SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Sort - Output: (sum(_hyper_1_1_chunk.segment_by_value)), _hyper_1_1_chunk.segment_by_value - Sort Key: (sum(_hyper_1_1_chunk.segment_by_value)), _hyper_1_1_chunk.segment_by_value - -> Finalize HashAggregate - Output: sum(_hyper_1_1_chunk.segment_by_value), _hyper_1_1_chunk.segment_by_value - Group Key: _hyper_1_1_chunk.segment_by_value - -> Gather - Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: per compressed batch - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: _hyper_1_2_chunk.segment_by_value, (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: per compressed batch - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: _hyper_1_3_chunk.segment_by_value, (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: per compressed batch - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - Group Key: _hyper_1_4_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - Group Key: _hyper_1_5_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - Group Key: _hyper_1_6_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - Group Key: _hyper_1_7_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - Group Key: _hyper_1_8_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - Group Key: _hyper_1_9_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - Group Key: _hyper_1_10_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value -(66 rows) - SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; sum | segment_by_value ------+------------------ @@ -796,75 +770,72 @@ SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_b :EXPLAIN SELECT segment_by_value, sum(segment_by_value) FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Sort Output: _hyper_1_1_chunk.segment_by_value, (sum(_hyper_1_1_chunk.segment_by_value)) Sort Key: _hyper_1_1_chunk.segment_by_value, (sum(_hyper_1_1_chunk.segment_by_value)) -> Finalize HashAggregate Output: _hyper_1_1_chunk.segment_by_value, sum(_hyper_1_1_chunk.segment_by_value) Group Key: _hyper_1_1_chunk.segment_by_value - -> Gather - Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: per compressed batch - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: _hyper_1_2_chunk.segment_by_value, (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: per compressed batch - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: _hyper_1_3_chunk.segment_by_value, (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: per compressed batch - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - Group Key: _hyper_1_4_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - Group Key: _hyper_1_5_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - Group Key: _hyper_1_6_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - Group Key: _hyper_1_7_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - Group Key: _hyper_1_8_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - Group Key: _hyper_1_9_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - Group Key: _hyper_1_10_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value -(66 rows) + -> Append + -> Custom Scan (VectorAgg) + Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Grouping Policy: per compressed batch + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: _hyper_1_2_chunk.segment_by_value, (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + Grouping Policy: per compressed batch + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: _hyper_1_3_chunk.segment_by_value, (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + Grouping Policy: per compressed batch + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + Group Key: _hyper_1_4_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + Group Key: _hyper_1_5_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + Group Key: _hyper_1_6_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + Group Key: _hyper_1_7_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + Group Key: _hyper_1_8_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + Group Key: _hyper_1_9_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + Group Key: _hyper_1_10_chunk.segment_by_value + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value +(63 rows) SELECT segment_by_value, sum(segment_by_value) FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; segment_by_value | sum @@ -991,64 +962,61 @@ SELECT sum(int_value) FROM testtable; :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.int_value) - -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.int_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.int_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.int_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.int_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.int_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.int_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.int_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.int_value -(55 rows) + -> Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.int_value) + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.int_value) + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.int_value) + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.int_value) + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.int_value) + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.int_value) + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.int_value) + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value +(52 rows) -- Vectorized aggregation possible SELECT sum(float_value) FROM testtable; @@ -1059,64 +1027,61 @@ SELECT sum(float_value) FROM testtable; :EXPLAIN SELECT sum(float_value) FROM testtable; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.float_value) - -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk.float_value)) - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.float_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.float_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.float_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.float_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.float_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.float_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.float_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.float_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.float_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.float_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.float_value) - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.float_value -(55 rows) + -> Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.float_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.float_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.float_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.float_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.float_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.float_value + -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.float_value) + -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.float_value) + -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.float_value) + -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.float_value) + -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.float_value) + -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.float_value) + -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.float_value) + -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.float_value +(52 rows) --- -- Tests with all chunks compressed @@ -1139,6 +1104,7 @@ NOTICE: chunk "_hyper_1_3_chunk" is already compressed _timescaledb_internal._hyper_1_10_chunk (10 rows) +VACUUM ANALYZE testtable; -- Vectorized aggregation possible SELECT sum(segment_by_value) FROM testtable; sum @@ -2375,81 +2341,81 @@ SELECT sum(int_value) FROM testtable WHERE segment_by_value > 5; Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk Output: _hyper_1_41_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk + -> Parallel Index Scan using compress_hyper_2_51_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_51_chunk Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value - Filter: (compress_hyper_2_51_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_51_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_42_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk Output: _hyper_1_42_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk + -> Parallel Index Scan using compress_hyper_2_52_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_52_chunk Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value - Filter: (compress_hyper_2_52_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_52_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_43_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk Output: _hyper_1_43_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk + -> Parallel Index Scan using compress_hyper_2_53_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_53_chunk Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value - Filter: (compress_hyper_2_53_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_53_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_44_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk Output: _hyper_1_44_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk + -> Parallel Index Scan using compress_hyper_2_54_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_54_chunk Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value - Filter: (compress_hyper_2_54_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_54_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_45_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk Output: _hyper_1_45_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk + -> Parallel Index Scan using compress_hyper_2_55_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_55_chunk Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value - Filter: (compress_hyper_2_55_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_55_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_46_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk Output: _hyper_1_46_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk + -> Parallel Index Scan using compress_hyper_2_56_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_56_chunk Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value - Filter: (compress_hyper_2_56_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_56_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_47_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk Output: _hyper_1_47_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk + -> Parallel Index Scan using compress_hyper_2_57_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_57_chunk Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value - Filter: (compress_hyper_2_57_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_57_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_48_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk Output: _hyper_1_48_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk + -> Parallel Index Scan using compress_hyper_2_58_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_58_chunk Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value - Filter: (compress_hyper_2_58_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_58_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_49_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk Output: _hyper_1_49_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk + -> Parallel Index Scan using compress_hyper_2_59_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_59_chunk Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value - Filter: (compress_hyper_2_59_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_59_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_1_50_chunk.int_value)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk Output: _hyper_1_50_chunk.int_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk + -> Parallel Index Scan using compress_hyper_2_60_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_60_chunk Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value - Filter: (compress_hyper_2_60_chunk.segment_by_value > 5) + Index Cond: (compress_hyper_2_60_chunk.segment_by_value > 5) (86 rows) SELECT sum(int_value) FROM testtable WHERE segment_by_value > 5; @@ -3265,73 +3231,73 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND se Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + -> Parallel Index Scan using compress_hyper_4_110_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_110_chunk Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value - Filter: ((compress_hyper_4_110_chunk.segment_by_value1 > 0) AND (compress_hyper_4_110_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_110_chunk.segment_by_value1 > 0) AND (compress_hyper_4_110_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + -> Parallel Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + -> Parallel Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + -> Parallel Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + -> Parallel Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + -> Parallel Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + -> Parallel Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + -> Parallel Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + -> Parallel Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) (78 rows) :EXPLAIN @@ -3349,73 +3315,73 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND se Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + -> Parallel Index Scan using compress_hyper_4_110_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_110_chunk Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value - Filter: ((compress_hyper_4_110_chunk.segment_by_value1 > 0) AND (compress_hyper_4_110_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_110_chunk.segment_by_value1 > 0) AND (compress_hyper_4_110_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + -> Parallel Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + -> Parallel Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + -> Parallel Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + -> Parallel Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + -> Parallel Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + -> Parallel Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + -> Parallel Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) Grouping Policy: all compressed batches -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + -> Parallel Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) + Index Cond: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) (78 rows) -- Vectorization not possible filter on segment_by and compressed value diff --git a/tsl/test/shared/expected/ordered_append-14.out b/tsl/test/shared/expected/ordered_append-14.out index b7fab79f296..de47a266539 100644 --- a/tsl/test/shared/expected/ordered_append-14.out +++ b/tsl/test/shared/expected/ordered_append-14.out @@ -2344,18 +2344,22 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(16 rows) -- test query with ORDER BY time_bucket, device_id -- must not use ordered append @@ -2370,21 +2374,31 @@ ORDER BY time_bucket('1d', time), LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=8 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(15 rows) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(25 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -2394,18 +2408,22 @@ ORDER BY date_trunc('day', time) LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(16 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -2415,18 +2433,22 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(16 rows) -- test query with ORDER BY date_trunc, device_id -- must not use ordered append @@ -2441,21 +2463,31 @@ ORDER BY 1, LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=8 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(15 rows) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(25 rows) -- test query with now() should result in ordered ChunkAppend :PREFIX @@ -3922,30 +3954,54 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(48 rows) -- test query with ORDER BY time_bucket, device_id -- must not use ordered append @@ -3960,30 +4016,49 @@ ORDER BY time_bucket('1d', time), LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(24 rows) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(43 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -3993,30 +4068,54 @@ ORDER BY date_trunc('day', time) LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(48 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -4026,30 +4125,54 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(48 rows) -- test query with ORDER BY date_trunc, device_id -- must not use ordered append @@ -4064,30 +4187,49 @@ ORDER BY 1, LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(24 rows) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(43 rows) -- test query with now() should result in ordered ChunkAppend :PREFIX diff --git a/tsl/test/shared/expected/ordered_append-15.out b/tsl/test/shared/expected/ordered_append-15.out index e617dc1936a..19f37ac77f4 100644 --- a/tsl/test/shared/expected/ordered_append-15.out +++ b/tsl/test/shared/expected/ordered_append-15.out @@ -2368,18 +2368,26 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY time_bucket, device_id -- must not use ordered append @@ -2394,21 +2402,34 @@ ORDER BY time_bucket('1d', time), LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=8 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(15 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=7196 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(28 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -2418,18 +2439,26 @@ ORDER BY date_trunc('day', time) LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -2439,18 +2468,26 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY date_trunc, device_id -- must not use ordered append @@ -2465,21 +2502,34 @@ ORDER BY 1, LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=8 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(15 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=7196 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(28 rows) -- test query with now() should result in ordered ChunkAppend :PREFIX @@ -3952,30 +4002,64 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY time_bucket, device_id -- must not use ordered append @@ -3990,30 +4074,55 @@ ORDER BY time_bucket('1d', time), LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(49 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -4023,30 +4132,64 @@ ORDER BY date_trunc('day', time) LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -4056,30 +4199,64 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY date_trunc, device_id -- must not use ordered append @@ -4094,30 +4271,55 @@ ORDER BY 1, LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(49 rows) -- test query with now() should result in ordered ChunkAppend :PREFIX diff --git a/tsl/test/shared/expected/ordered_append-16.out b/tsl/test/shared/expected/ordered_append-16.out index e617dc1936a..19f37ac77f4 100644 --- a/tsl/test/shared/expected/ordered_append-16.out +++ b/tsl/test/shared/expected/ordered_append-16.out @@ -2368,18 +2368,26 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY time_bucket, device_id -- must not use ordered append @@ -2394,21 +2402,34 @@ ORDER BY time_bucket('1d', time), LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=8 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(15 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=7196 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(28 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -2418,18 +2439,26 @@ ORDER BY date_trunc('day', time) LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -2439,18 +2468,26 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY date_trunc, device_id -- must not use ordered append @@ -2465,21 +2502,34 @@ ORDER BY 1, LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=8 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(15 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=7196 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(28 rows) -- test query with now() should result in ordered ChunkAppend :PREFIX @@ -3952,30 +4002,64 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY time_bucket, device_id -- must not use ordered append @@ -3990,30 +4074,55 @@ ORDER BY time_bucket('1d', time), LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(49 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -4023,30 +4132,64 @@ ORDER BY date_trunc('day', time) LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -4056,30 +4199,64 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY date_trunc, device_id -- must not use ordered append @@ -4094,30 +4271,55 @@ ORDER BY 1, LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(49 rows) -- test query with now() should result in ordered ChunkAppend :PREFIX diff --git a/tsl/test/shared/expected/ordered_append-17.out b/tsl/test/shared/expected/ordered_append-17.out index 77f78ff6839..43dab0a2dff 100644 --- a/tsl/test/shared/expected/ordered_append-17.out +++ b/tsl/test/shared/expected/ordered_append-17.out @@ -2311,18 +2311,26 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY time_bucket, device_id -- must not use ordered append @@ -2337,21 +2345,34 @@ ORDER BY time_bucket('1d', time), LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=8 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(15 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=7196 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(28 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -2361,18 +2382,26 @@ ORDER BY date_trunc('day', time) LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -2382,18 +2411,26 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=25190 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1) -(12 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_compressed."time") + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=17990 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(20 rows) -- test query with ORDER BY date_trunc, device_id -- must not use ordered append @@ -2408,21 +2445,34 @@ ORDER BY 1, LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=8 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=12 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(15 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=7196 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=7196 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=8 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=10076 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10076 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 18 +(28 rows) -- test query with now() should result in ordered ChunkAppend :PREFIX @@ -3865,30 +3915,64 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: time_bucket('@ 1 day'::interval, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY time_bucket, device_id -- must not use ordered append @@ -3903,30 +3987,55 @@ ORDER BY time_bucket('1d', time), LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (time_bucket('@ 1 day'::interval, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(49 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -3936,30 +4045,64 @@ ORDER BY date_trunc('day', time) LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY date_trunc :PREFIX @@ -3969,30 +4112,64 @@ ORDER BY 1 LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) - Sort Method: top-N heapsort - -> Result (actual rows=68370 loops=1) - -> Append (actual rows=68370 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=15114 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space_compressed (actual rows=1 loops=1) + Order: date_trunc('day'::text, metrics_space_compressed."time") + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=10794 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10794 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Merge Append (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) + -> Sort (never executed) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")) + -> Result (never executed) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (never executed) + -> Seq Scan on compress_hyper_X_X_chunk (never executed) +(58 rows) -- test query with ORDER BY date_trunc, device_id -- must not use ordered append @@ -4007,30 +4184,55 @@ ORDER BY 1, LIMIT 1; QUERY PLAN Limit (actual rows=1 loops=1) - -> Sort (actual rows=1 loops=1) - Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id - Sort Method: top-N heapsort - -> Result (actual rows=27348 loops=1) - -> Append (actual rows=27348 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=4 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) - -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) - -> Index Scan using compress_hyper_X_X_chunk_device_id__ts_meta_min_1__ts_meta_idx on compress_hyper_X_X_chunk (actual rows=6 loops=1) - Index Cond: (device_id = ANY ('{1,2}'::integer[])) -(24 rows) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=3598 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 8 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + -> Sort (actual rows=1 loops=1) + Sort Key: (date_trunc('day'::text, _hyper_X_X_chunk."time")), _hyper_X_X_chunk.device_id + Sort Method: top-N heapsort + -> Result (actual rows=5038 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=5038 loops=1) + -> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1) + Filter: (device_id = ANY ('{1,2}'::integer[])) + Rows Removed by Filter: 12 +(49 rows) -- test query with now() should result in ordered ChunkAppend :PREFIX diff --git a/tsl/test/sql/compression.sql b/tsl/test/sql/compression.sql index adb7b81f07b..16541a12b37 100644 --- a/tsl/test/sql/compression.sql +++ b/tsl/test/sql/compression.sql @@ -797,18 +797,14 @@ CALL reindex_compressed_hypertable('f_sensor_data'); VACUUM ANALYZE f_sensor_data; -- Encourage use of parallel plans +SET max_parallel_workers_per_gather = 4; +SET min_parallel_index_scan_size = 0; +SET min_parallel_table_scan_size = 0; SET parallel_setup_cost = 0; SET parallel_tuple_cost = 0; -SET min_parallel_table_scan_size TO '0'; - -\set explain 'EXPLAIN (VERBOSE, COSTS OFF)' - -SHOW min_parallel_table_scan_size; SHOW max_parallel_workers; -SHOW max_parallel_workers_per_gather; -SET max_parallel_workers_per_gather = 4; -SHOW max_parallel_workers_per_gather; +\set explain 'EXPLAIN (VERBOSE, COSTS OFF)' -- We disable enable_parallel_append here to ensure -- that we create the same query plan in all PG 14.X versions @@ -817,16 +813,10 @@ SET enable_parallel_append = false; :explain SELECT sum(cpu) FROM f_sensor_data; --- Encourage use of Index Scan - -SET enable_seqscan = false; -SET enable_indexscan = true; -SET min_parallel_index_scan_size = 0; -SET min_parallel_table_scan_size = 0; - CREATE INDEX ON f_sensor_data (time, sensor_id); + :explain -SELECT * FROM f_sensor_data WHERE sensor_id > 100; +SELECT * FROM f_sensor_data WHERE sensor_id > 1000; RESET enable_parallel_append; @@ -844,11 +834,13 @@ FROM ORDER BY time; +VACUUM ANALYZE f_sensor_data; + :explain SELECT sum(cpu) FROM f_sensor_data; :explain -SELECT * FROM f_sensor_data WHERE sensor_id > 100; +SELECT * FROM f_sensor_data WHERE sensor_id > 1000; -- Test non-partial paths below append are not executed multiple times diff --git a/tsl/test/sql/merge_append_partially_compressed.sql.in b/tsl/test/sql/merge_append_partially_compressed.sql.in index a75aa16e78c..11fcf0be4e8 100644 --- a/tsl/test/sql/merge_append_partially_compressed.sql.in +++ b/tsl/test/sql/merge_append_partially_compressed.sql.in @@ -47,6 +47,20 @@ SELECT * FROM ht_metrics_compressed WHERE device = 3 ORDER BY time, device DESC :PREFIX SELECT * FROM ht_metrics_compressed ORDER BY device, time DESC LIMIT 1; -- with pushdown :PREFIX SELECT * FROM ht_metrics_compressed WHERE device IN (1,2,3) ORDER BY device, time DESC LIMIT 1; -- with pushdown +-- Test direct ordered select from a single partially compressed chunk +select * from show_chunks('ht_metrics_compressed') chunk order by chunk limit 1 \gset + +:PREFIX +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + +SELECT * FROM :chunk ORDER BY device, time LIMIT 5; + +:PREFIX +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + +SELECT * FROM :chunk ORDER BY device DESC, time DESC LIMIT 5; + + CREATE TABLE test1 ( time timestamptz NOT NULL, x1 integer, diff --git a/tsl/test/sql/vector_agg_memory.sql b/tsl/test/sql/vector_agg_memory.sql index 84a32071319..97a720006a5 100644 --- a/tsl/test/sql/vector_agg_memory.sql +++ b/tsl/test/sql/vector_agg_memory.sql @@ -63,11 +63,8 @@ select * from log where ( truncate log; set max_parallel_workers_per_gather = 0; set timescaledb.debug_require_vector_agg = 'require'; --- Despite the tweaks above, we are unable to force the HashAggregation, because --- the unsorted DecompressChunk paths for aggregation are not created properly --- (see issue #6836). Limit the memory consumed by tuplesort. -set work_mem = '64kB'; +-- We should reliably see HashAggregate here because of the tweaks we made above. explain (costs off) select ts_debug_allocated_bytes() bytes, count(*) a, count(t) b, sum(t) c, avg(t) d, min(t) e, max(t) f from mvagg where t >= -1 and t < 1000000 group by s1; @@ -90,10 +87,8 @@ reset work_mem; select * from log where ( -- For aggregation by segmentby, memory usage should be constant regardless -- of the number of tuples. Still, we have to allow for small variations - -- that can be caused by other reasons. Currently the major increase is - -- caused by tuplesort, because we are unable to force hash aggregation due - -- to unrelated planning bugs. - select regr_slope(bytes, n) > 0.05 from log + -- that can be caused by other reasons. + select regr_slope(bytes, n) > 0.01 from log ); reset timescaledb.debug_require_vector_agg; diff --git a/tsl/test/sql/vectorized_aggregation.sql b/tsl/test/sql/vectorized_aggregation.sql index 324a96716f5..05f33c0422f 100644 --- a/tsl/test/sql/vectorized_aggregation.sql +++ b/tsl/test/sql/vectorized_aggregation.sql @@ -31,6 +31,7 @@ SELECT sum(segment_by_value), sum(int_value), sum(float_value) FROM testtable; -- Tests with some chunks compressed --- SELECT compress_chunk(ch) FROM show_chunks('testtable') ch LIMIT 3; +VACUUM ANALYZE testtable; -- Vectorized aggregation possible SELECT sum(segment_by_value) FROM testtable; @@ -89,6 +90,7 @@ SELECT sum(float_value) FROM testtable; -- Tests with all chunks compressed --- SELECT compress_chunk(ch, if_not_compressed => true) FROM show_chunks('testtable') ch; +VACUUM ANALYZE testtable; -- Vectorized aggregation possible SELECT sum(segment_by_value) FROM testtable;