Skip to content

Commit

Permalink
Compression can't be enabled on caggs
Browse files Browse the repository at this point in the history
The continuous aggregate creation failed in case segmentby/orderby
columns needed quotation.
  • Loading branch information
kgyrtkirk committed Feb 7, 2023
1 parent 4cb76bc commit 5b8c4c4
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 32 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ accidentally triggering the load of a previous DB version.**
* #4926 Fix corruption when inserting into compressed chunks
* #5218 Add role-level security to job error log
* #5214 Fix use of prepared statement in async module
* #5290 Compression can't be enabled on continuous aggregates when segmentby/orderby columns need quotation

## 2.9.3 (2023-02-03)

Expand Down
40 changes: 8 additions & 32 deletions tsl/src/continuous_aggs/options.c
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ cagg_get_compression_params(ContinuousAgg *agg, Hypertable *mat_ht)
{
List *defelems = NIL;
const Dimension *mat_ht_dim = hyperspace_get_open_dimension(mat_ht->space, 0);
const char *mat_ht_timecolname = NameStr(mat_ht_dim->fd.column_name);
const char *mat_ht_timecolname = quote_identifier(NameStr(mat_ht_dim->fd.column_name));
DefElem *ordby = makeDefElemExtended("timescaledb",
"compress_orderby",
(Node *) makeString((char *) mat_ht_timecolname),
Expand All @@ -166,49 +166,25 @@ cagg_get_compression_params(ContinuousAgg *agg, Hypertable *mat_ht)
List *grp_colnames = cagg_find_groupingcols(agg, mat_ht);
if (grp_colnames)
{
StringInfo info = makeStringInfo();
ListCell *lc;
/* we have column names. they are guaranteed to be at most
* NAMEDATALEN
*/
int seglen = ((NAMEDATALEN + 1) * list_length(grp_colnames)) + 1;
char *segmentby = (char *) palloc(seglen);
int segidx = 0;
foreach (lc, grp_colnames)
{
int collen;
char *grpcol = (char *) lfirst(lc);
/* skip time dimension col if it appears in group-by list */
if (namestrcmp((Name) & (mat_ht_dim->fd.column_name), grpcol) == 0)
continue;
if (segidx > 0 && (seglen - segidx) > 1)
{
strlcpy(segmentby + segidx, ",", 2);
segidx = segidx + 1;
}
collen = strlen(grpcol);
if (seglen - segidx > collen)
{
strlcpy(segmentby + segidx, grpcol, collen + 1);
segidx = segidx + collen;
}
else
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("%s not enough space to copy segment by column (%d %d %d)",
__func__,
seglen,
segidx,
collen)));
}
if (info->len > 0)
appendStringInfoString(info, ",");
appendStringInfoString(info, quote_identifier(grpcol));
}
if (segidx != 0)

if (info->len > 0)
{
DefElem *segby;
segmentby[segidx] = '\0';
segby = makeDefElemExtended("timescaledb",
"compress_segmentby",
(Node *) makeString(segmentby),
(Node *) makeString(info->data),
DEFELEM_UNSPEC,
-1);
defelems = lappend(defelems, segby);
Expand Down
25 changes: 25 additions & 0 deletions tsl/test/expected/compression_ddl.out
Original file line number Diff line number Diff line change
Expand Up @@ -1273,3 +1273,28 @@ Triggers:
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._compressed_hypertable_23 FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker()

DROP TABLE metric CASCADE;
-- Creating hypertable
CREATE TABLE "tEst2" (
"Id" uuid NOT NULL,
"Time" timestamp with time zone NOT NULL,
CONSTRAINT "test2_pkey" PRIMARY KEY ("Id", "Time")
);
SELECT create_hypertable(
'"tEst2"',
'Time',
chunk_time_interval => INTERVAL '1 day'
);
create_hypertable
---------------------
(24,public,tEst2,t)
(1 row)

alter table "tEst2" set (timescaledb.compress=true, timescaledb.compress_segmentby='"Id"');
CREATE MATERIALIZED VIEW "tEst2_mv"
WITH (timescaledb.continuous) AS
SELECT "Id" as "Idd",
time_bucket(INTERVAL '1 day', "Time") AS "bUcket"
FROM public."tEst2"
GROUP BY "Idd", "bUcket";
NOTICE: continuous aggregate "tEst2_mv" is already up-to-date
ALTER MATERIALIZED VIEW "tEst2_mv" SET (timescaledb.compress = true);
23 changes: 23 additions & 0 deletions tsl/test/sql/compression_ddl.sql
Original file line number Diff line number Diff line change
Expand Up @@ -549,3 +549,26 @@ WHERE uc_hypertable.table_name like 'metric' \gset

DROP TABLE metric CASCADE;

-- Creating hypertable
CREATE TABLE "tEst2" (
"Id" uuid NOT NULL,
"Time" timestamp with time zone NOT NULL,
CONSTRAINT "test2_pkey" PRIMARY KEY ("Id", "Time")
);

SELECT create_hypertable(
'"tEst2"',
'Time',
chunk_time_interval => INTERVAL '1 day'
);

alter table "tEst2" set (timescaledb.compress=true, timescaledb.compress_segmentby='"Id"');

CREATE MATERIALIZED VIEW "tEst2_mv"
WITH (timescaledb.continuous) AS
SELECT "Id" as "Idd",
time_bucket(INTERVAL '1 day', "Time") AS "bUcket"
FROM public."tEst2"
GROUP BY "Idd", "bUcket";

ALTER MATERIALIZED VIEW "tEst2_mv" SET (timescaledb.compress = true);

0 comments on commit 5b8c4c4

Please sign in to comment.