From a7a8fe6c7ab7b40d624d2336d05fe7abcfe6f3a7 Mon Sep 17 00:00:00 2001 From: Rohan Yadav Date: Wed, 4 Sep 2019 18:17:29 -0400 Subject: [PATCH 1/4] sql: Display inherited constraints in SHOW PARTITIONS SHOW PARTITIONS now displays the inherited zone configuration of the partitions in a separate column. To accomplish this, the crdb_internal.zones table now holds on to the inherited constraints of each zone in a separate column. Additionally, the crdb_internal.partitions table holds on to the zone_id and subzone_id of the zone configuration the partition refers to. These id's correspond to the zone configuration at the lowest point in that partitions "inheritance chain". Release justification: Adds a low risk, good to have UX feature. Fixes #40349. Release note (sql change): * SHOW PARTITIONS now displays inherited zone configurations. * Adds the zone_id, subzone_id columns to crdb_internal.partitions, which form a link to the corresponding zone config in crdb_internal.zones which apply to the partitions. * Rename the config_yaml, config_sql and config_proto columns in crdb_internal.zones to raw_config_yaml, raw_config_sql, raw_config_proto. * Add the columns full_config_sql and full_config_yaml to the crdb_internal.zones table which display the full/inherited zone configuration. --- pkg/base/zone.go | 22 ++ .../testdata/logic_test/crdb_internal | 28 +- .../testdata/logic_test/distsql_partitioning | 340 +++++++++++++++--- .../testdata/logic_test/partitioning | 152 ++++++-- pkg/config/zone.go | 3 +- pkg/server/admin.go | 2 +- pkg/sql/crdb_internal.go | 147 +++++++- pkg/sql/delegate/show_partitions.go | 45 ++- .../testdata/logic_test/crdb_internal | 14 +- .../logictest/testdata/logic_test/show_source | 4 +- .../logictest/testdata/logic_test/zone_config | 14 +- .../opt/exec/execbuilder/testdata/subquery | 4 +- pkg/sql/show_zone_config.go | 147 +++++--- pkg/storage/reports/constraint_report.go | 5 +- pkg/storage/reports/locality_report.go | 3 +- pkg/storage/reports/range_report.go | 5 +- pkg/storage/reports/reporter.go | 3 +- pkg/storage/reports/reporter_test.go | 6 +- pkg/storage/reports/zone_key.go | 23 +- pkg/testutils/sqlutils/zone.go | 4 +- 20 files changed, 751 insertions(+), 220 deletions(-) create mode 100644 pkg/base/zone.go diff --git a/pkg/base/zone.go b/pkg/base/zone.go new file mode 100644 index 000000000000..c4fcf75d2827 --- /dev/null +++ b/pkg/base/zone.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package base + +// SubzoneID represents a subzone within a zone. It's the subzone's index within +// the parent zone + 1; there's no subzone 0 so that 0 can be used as a +// sentinel. +type SubzoneID uint32 + +// SubzoneIDFromIndex turns a subzone's index within its parent zone into its +// SubzoneID. +func SubzoneIDFromIndex(idx int) SubzoneID { + return SubzoneID(idx + 1) +} diff --git a/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal b/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal index 4b2b0dafee77..235a08ce8429 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal +++ b/pkg/ccl/logictestccl/testdata/logic_test/crdb_internal @@ -1,9 +1,9 @@ # LogicTest: local -query IITTITTT colnames +query IITTITTTII colnames SELECT * FROM crdb_internal.partitions ---- -table_id index_id parent_name name columns column_names list_value range_value +table_id index_id parent_name name columns column_names list_value range_value zone_id subzone_id statement ok CREATE TABLE t1 ( @@ -33,16 +33,18 @@ CREATE table t2 (a STRING PRIMARY KEY) PARTITION BY LIST (a) ( PARTITION pfoo VALUES IN ('foo') ) -query IITTITTT +# Since there are no zone configurations on any of these partitions, tables, +# or databases, these partitions inherit directly from the default config. +query IITTITTTII SELECT * FROM crdb_internal.partitions ORDER BY table_id, index_id, name ---- -53 1 NULL p12 1 a (1), (2) NULL -53 1 p12 p12p3 1 b (3) NULL -53 1 p12p3 p12p3p8 1 c (8) NULL -53 1 NULL p6 1 a (6) NULL -53 1 p6 p6p7 1 b NULL (MINVALUE) TO (7) -53 1 p6 p6p8 1 b NULL (7) TO (8) -53 1 p6 p6px 1 b NULL (8) TO (MAXVALUE) -53 1 p12 pd 1 b (DEFAULT) NULL -53 2 NULL p00 2 a, b (0, 0) NULL -54 1 NULL pfoo 1 a ('foo') NULL +53 1 NULL p12 1 a (1), (2) NULL 0 0 +53 1 p12 p12p3 1 b (3) NULL 0 0 +53 1 p12p3 p12p3p8 1 c (8) NULL 0 0 +53 1 NULL p6 1 a (6) NULL 0 0 +53 1 p6 p6p7 1 b NULL (MINVALUE) TO (7) 0 0 +53 1 p6 p6p8 1 b NULL (7) TO (8) 0 0 +53 1 p6 p6px 1 b NULL (8) TO (MAXVALUE) 0 0 +53 1 p12 pd 1 b (DEFAULT) NULL 0 0 +53 2 NULL p00 2 a, b (0, 0) NULL 0 0 +54 1 NULL pfoo 1 a ('foo') NULL 0 0 diff --git a/pkg/ccl/logictestccl/testdata/logic_test/distsql_partitioning b/pkg/ccl/logictestccl/testdata/logic_test/distsql_partitioning index 2170a8820d6d..78fc829300f2 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/distsql_partitioning +++ b/pkg/ccl/logictestccl/testdata/logic_test/distsql_partitioning @@ -12,40 +12,100 @@ ALTER TABLE t1 PARTITION BY LIST (x) ( PARTITION p3 VALUES IN (3) ) -query TTTTTTTT colnames +query TTTTTTTTT colnames SHOW PARTITIONS FROM DATABASE test ---- -database_name table_name partition_name parent_partition column_names index_name partition_value zone_config -test t1 p1 NULL x t1@primary (1) NULL -test t1 p2 NULL x t1@primary (2) NULL -test t1 p3 NULL x t1@primary (3) NULL +database_name table_name partition_name parent_partition column_names index_name partition_value zone_config full_zone_config +test t1 p1 NULL x t1@primary (1) NULL range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[]', +lease_preferences = '[]' +test t1 p2 NULL x t1@primary (2) NULL range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[]', +lease_preferences = '[]' +test t1 p3 NULL x t1@primary (3) NULL range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[]', +lease_preferences = '[]' statement ok ALTER PARTITION p1 OF TABLE t1 CONFIGURE ZONE USING constraints='[+dc=dc1]'; ALTER PARTITION p2 OF TABLE t1 CONFIGURE ZONE USING constraints='[+dc=dc2]'; ALTER PARTITION p3 OF TABLE t1 CONFIGURE ZONE USING constraints='[+dc=dc3]' -query TTTTTTTT colnames +query TTTTTTTTT colnames SHOW PARTITIONS FROM DATABASE test ---- -database_name table_name partition_name parent_partition column_names index_name partition_value zone_config -test t1 p1 NULL x t1@primary (1) constraints = '[+dc=dc1]' -test t1 p2 NULL x t1@primary (2) constraints = '[+dc=dc2]' -test t1 p3 NULL x t1@primary (3) constraints = '[+dc=dc3]' +database_name table_name partition_name parent_partition column_names index_name partition_value zone_config full_zone_config +test t1 p1 NULL x t1@primary (1) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t1 p2 NULL x t1@primary (2) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' +test t1 p3 NULL x t1@primary (3) constraints = '[+dc=dc3]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc3]', +lease_preferences = '[]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM TABLE t1 ---- -test t1 p1 NULL x t1@primary (1) constraints = '[+dc=dc1]' -test t1 p2 NULL x t1@primary (2) constraints = '[+dc=dc2]' -test t1 p3 NULL x t1@primary (3) constraints = '[+dc=dc3]' +test t1 p1 NULL x t1@primary (1) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t1 p2 NULL x t1@primary (2) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' +test t1 p3 NULL x t1@primary (3) constraints = '[+dc=dc3]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc3]', +lease_preferences = '[]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM INDEX t1@primary ---- -test t1 p1 NULL x t1@primary (1) constraints = '[+dc=dc1]' -test t1 p2 NULL x t1@primary (2) constraints = '[+dc=dc2]' -test t1 p3 NULL x t1@primary (3) constraints = '[+dc=dc3]' +test t1 p1 NULL x t1@primary (1) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t1 p2 NULL x t1@primary (2) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' +test t1 p3 NULL x t1@primary (3) constraints = '[+dc=dc3]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc3]', +lease_preferences = '[]' statement ok CREATE TABLE t2 (x INT PRIMARY KEY) @@ -60,26 +120,71 @@ statement ok ALTER PARTITION p1 OF TABLE t2 CONFIGURE ZONE USING constraints='[+dc=dc1]'; ALTER PARTITION p2 OF TABLE t2 CONFIGURE ZONE USING constraints='[+dc=dc2]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM DATABASE test ---- -test t1 p1 NULL x t1@primary (1) constraints = '[+dc=dc1]' -test t1 p2 NULL x t1@primary (2) constraints = '[+dc=dc2]' -test t1 p3 NULL x t1@primary (3) constraints = '[+dc=dc3]' -test t2 p1 NULL x t2@primary (1) TO (2) constraints = '[+dc=dc1]' -test t2 p2 NULL x t2@primary (2) TO (3) constraints = '[+dc=dc2]' +test t1 p1 NULL x t1@primary (1) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t1 p2 NULL x t1@primary (2) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' +test t1 p3 NULL x t1@primary (3) constraints = '[+dc=dc3]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc3]', +lease_preferences = '[]' +test t2 p1 NULL x t2@primary (1) TO (2) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t2 p2 NULL x t2@primary (2) TO (3) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM TABLE t2 ---- -test t2 p1 NULL x t2@primary (1) TO (2) constraints = '[+dc=dc1]' -test t2 p2 NULL x t2@primary (2) TO (3) constraints = '[+dc=dc2]' +test t2 p1 NULL x t2@primary (1) TO (2) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t2 p2 NULL x t2@primary (2) TO (3) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM INDEX t2@primary ---- -test t2 p1 NULL x t2@primary (1) TO (2) constraints = '[+dc=dc1]' -test t2 p2 NULL x t2@primary (2) TO (3) constraints = '[+dc=dc2]' +test t2 p1 NULL x t2@primary (1) TO (2) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t2 p2 NULL x t2@primary (2) TO (3) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' statement ok CREATE TABLE t3 (x INT PRIMARY KEY, y INT, INDEX sec (y)) @@ -102,27 +207,77 @@ ALTER PARTITION p2 OF TABLE t3 CONFIGURE ZONE USING constraints='[+dc=dc2]'; ALTER PARTITION p3 OF INDEX t3@sec CONFIGURE ZONE USING constraints='[+dc=dc3]'; ALTER PARTITION p4 OF INDEX t3@sec CONFIGURE ZONE USING constraints='[+dc=dc4]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM TABLE t3 ---- -test t3 p1 NULL x t3@primary (1) constraints = '[+dc=dc1]' -test t3 p2 NULL x t3@primary (2) constraints = '[+dc=dc2]' -test t3 p3 NULL y t3@sec (3) constraints = '[+dc=dc3]' -test t3 p4 NULL y t3@sec (4) constraints = '[+dc=dc4]' +test t3 p1 NULL x t3@primary (1) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t3 p2 NULL x t3@primary (2) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' +test t3 p3 NULL y t3@sec (3) constraints = '[+dc=dc3]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc3]', +lease_preferences = '[]' +test t3 p4 NULL y t3@sec (4) constraints = '[+dc=dc4]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc4]', +lease_preferences = '[]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM INDEX t3@* ---- -test t3 p1 NULL x t3@primary (1) constraints = '[+dc=dc1]' -test t3 p2 NULL x t3@primary (2) constraints = '[+dc=dc2]' -test t3 p3 NULL y t3@sec (3) constraints = '[+dc=dc3]' -test t3 p4 NULL y t3@sec (4) constraints = '[+dc=dc4]' +test t3 p1 NULL x t3@primary (1) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t3 p2 NULL x t3@primary (2) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' +test t3 p3 NULL y t3@sec (3) constraints = '[+dc=dc3]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc3]', +lease_preferences = '[]' +test t3 p4 NULL y t3@sec (4) constraints = '[+dc=dc4]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc4]', +lease_preferences = '[]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM INDEX t3@sec ---- -test t3 p3 NULL y t3@sec (3) constraints = '[+dc=dc3]' -test t3 p4 NULL y t3@sec (4) constraints = '[+dc=dc4]' +test t3 p3 NULL y t3@sec (3) constraints = '[+dc=dc3]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc3]', +lease_preferences = '[]' +test t3 p4 NULL y t3@sec (4) constraints = '[+dc=dc4]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc4]', +lease_preferences = '[]' statement ok CREATE TABLE t4 (x INT, y INT, PRIMARY KEY (x, y)) @@ -145,11 +300,98 @@ ALTER PARTITION p1_b OF TABLE t4 CONFIGURE ZONE USING constraints='[+dc=dc3]'; ALTER PARTITION p2 OF TABLE t4 CONFIGURE ZONE USING constraints='[+dc=dc4]'; ALTER PARTITION p2_a OF TABLE t4 CONFIGURE ZONE USING constraints='[+dc=dc5]' -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM TABLE t4 ---- -test t4 p1 NULL x t4@primary (1) constraints = '[+dc=dc1]' -test t4 p1_a p1 y t4@primary (2) constraints = '[+dc=dc2]' -test t4 p1_b p1 y t4@primary (3) constraints = '[+dc=dc3]' -test t4 p2 NULL x t4@primary (4) constraints = '[+dc=dc4]' -test t4 p2_a p2 y t4@primary (5) constraints = '[+dc=dc5]' +test t4 p1 NULL x t4@primary (1) constraints = '[+dc=dc1]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc1]', +lease_preferences = '[]' +test t4 p1_a p1 y t4@primary (2) constraints = '[+dc=dc2]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc2]', +lease_preferences = '[]' +test t4 p1_b p1 y t4@primary (3) constraints = '[+dc=dc3]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc3]', +lease_preferences = '[]' +test t4 p2 NULL x t4@primary (4) constraints = '[+dc=dc4]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc4]', +lease_preferences = '[]' +test t4 p2_a p2 y t4@primary (5) constraints = '[+dc=dc5]' range_min_bytes = 16777216, +range_max_bytes = 67108864, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[+dc=dc5]', +lease_preferences = '[]' + +# Partitioning inheritance test. +statement ok +CREATE DATABASE partitioning + +statement ok +CREATE TABLE partitioning.inheritance (x INT PRIMARY KEY) + +statement ok +ALTER INDEX partitioning.inheritance@primary PARTITION BY LIST (x) ( PARTITION p1 VALUES IN (1)) + +statement ok +ALTER DATABASE partitioning CONFIGURE ZONE USING range_min_bytes=64000, range_max_bytes=75000 + +query TTTTTTTTT +SHOW PARTITIONS FROM TABLE partitioning.inheritance +---- +partitioning inheritance p1 NULL x inheritance@primary (1) NULL range_min_bytes = 64000, +range_max_bytes = 75000, +gc.ttlseconds = 90000, +num_replicas = 3, +constraints = '[]', +lease_preferences = '[]' + +statement ok +ALTER TABLE partitioning.inheritance CONFIGURE ZONE USING gc.ttlseconds=80000 + +query TTTTTTTTT +SHOW PARTITIONS FROM TABLE partitioning.inheritance +---- +partitioning inheritance p1 NULL x inheritance@primary (1) NULL range_min_bytes = 64000, +range_max_bytes = 75000, +gc.ttlseconds = 80000, +num_replicas = 3, +constraints = '[]', +lease_preferences = '[]' + +statement ok +ALTER INDEX partitioning.inheritance@primary CONFIGURE ZONE USING num_replicas=5 + +query TTTTTTTTT +SHOW PARTITIONS FROM TABLE partitioning.inheritance +---- +partitioning inheritance p1 NULL x inheritance@primary (1) NULL range_min_bytes = 64000, +range_max_bytes = 75000, +gc.ttlseconds = 80000, +num_replicas = 5, +constraints = '[]', +lease_preferences = '[]' + +statement ok +ALTER PARTITION p1 OF INDEX partitioning.inheritance@primary CONFIGURE ZONE USING constraints='[+dc=dc1]' + +query TTTTTTTTT +SHOW PARTITIONS FROM TABLE partitioning.inheritance +---- +partitioning inheritance p1 NULL x inheritance@primary (1) constraints = '[+dc=dc1]' range_min_bytes = 64000, +range_max_bytes = 75000, +gc.ttlseconds = 80000, +num_replicas = 5, +constraints = '[+dc=dc1]', +lease_preferences = '[]' diff --git a/pkg/ccl/logictestccl/testdata/logic_test/partitioning b/pkg/ccl/logictestccl/testdata/logic_test/partitioning index 4ee93174c91d..b818f2b7b420 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/partitioning +++ b/pkg/ccl/logictestccl/testdata/logic_test/partitioning @@ -872,20 +872,35 @@ CREATE DATABASE d_show_partitions statement ok CREATE TABLE d_show_partitions.t (x INT PRIMARY KEY) PARTITION BY LIST (x) ( PARTITION p1 VALUES IN (1)) -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM DATABASE d_show_partitions ---- -d_show_partitions t p1 NULL x t@primary (1) NULL - -query TTTTTTTT +d_show_partitions t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' + +query TTTTTTTTT SHOW PARTITIONS FROM TABLE d_show_partitions.t ---- -d_show_partitions t p1 NULL x t@primary (1) NULL - -query TTTTTTTT +d_show_partitions t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' + +query TTTTTTTTT SHOW PARTITIONS FROM INDEX d_show_partitions.t@primary ---- -d_show_partitions t p1 NULL x t@primary (1) NULL +d_show_partitions t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' statement ok CREATE DATABASE "show partitions" @@ -893,20 +908,35 @@ CREATE DATABASE "show partitions" statement ok CREATE TABLE "show partitions".t (x INT PRIMARY KEY) PARTITION BY LIST (x) ( PARTITION p1 VALUES IN (1)) -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM DATABASE "show partitions" ---- -show partitions t p1 NULL x t@primary (1) NULL - -query TTTTTTTT +show partitions t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' + +query TTTTTTTTT SHOW PARTITIONS FROM TABLE "show partitions".t ---- -show partitions t p1 NULL x t@primary (1) NULL - -query TTTTTTTT +show partitions t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' + +query TTTTTTTTT SHOW PARTITIONS FROM INDEX "show partitions".t@primary ---- -show partitions t p1 NULL x t@primary (1) NULL +show partitions t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' statement ok CREATE DATABASE """" @@ -914,22 +944,96 @@ CREATE DATABASE """" statement ok CREATE TABLE """".t (x INT PRIMARY KEY) PARTITION BY LIST (x) ( PARTITION p1 VALUES IN (1)) -query TTTTTTTT +query TTTTTTTTT SHOW PARTITIONS FROM DATABASE """" ---- -" t p1 NULL x t@primary (1) NULL - -query TTTTTTTT +" t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' + +query TTTTTTTTT SHOW PARTITIONS FROM TABLE """".t ---- -" t p1 NULL x t@primary (1) NULL - -query TTTTTTTT +" t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' + +query TTTTTTTTT SHOW PARTITIONS FROM INDEX """".t@primary ---- -" t p1 NULL x t@primary (1) NULL +" t p1 NULL x t@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' query T SELECT feature_name FROM crdb_internal.feature_usage WHERE feature_name='sql.show.partitions' AND usage_count > 0 ---- sql.show.partitions + +# Testing show partitions with inherited constraints. +statement ok +CREATE TABLE t_inherit (x INT PRIMARY KEY) + +statement ok +ALTER TABLE t_inherit PARTITION BY LIST (x) ( PARTITION p1 VALUES IN (1) ) + +query TTTTTTTTT +SHOW PARTITIONS FROM TABLE t_inherit +---- +test t_inherit p1 NULL x t_inherit@primary (1) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' + +statement ok +ALTER PARTITION p1 of TABLE t_inherit CONFIGURE ZONE USING num_replicas=5 + +query TTTTTTTTT +SHOW PARTITIONS FROM TABLE t_inherit +---- +test t_inherit p1 NULL x t_inherit@primary (1) num_replicas = 5 range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 5, + constraints = '[]', + lease_preferences = '[]' + +statement ok +CREATE TABLE t_inherit_range (x INT PRIMARY KEY) + +statement ok +ALTER TABLE t_inherit_range PARTITION BY RANGE (x) ( PARTITION p1 VALUES FROM (1) to (2) ) + +query TTTTTTTTT +SHOW PARTITIONS FROM TABLE t_inherit_range +---- +test t_inherit_range p1 NULL x t_inherit_range@primary (1) TO (2) NULL range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 3, + constraints = '[]', + lease_preferences = '[]' + +statement ok +ALTER PARTITION p1 of TABLE t_inherit_range CONFIGURE ZONE USING num_replicas=5 + +query TTTTTTTTT +SHOW PARTITIONS FROM TABLE t_inherit_range +---- +test t_inherit_range p1 NULL x t_inherit_range@primary (1) TO (2) num_replicas = 5 range_min_bytes = 16777216, + range_max_bytes = 67108864, + gc.ttlseconds = 90000, + num_replicas = 5, + constraints = '[]', + lease_preferences = '[]' diff --git a/pkg/config/zone.go b/pkg/config/zone.go index 25e5f2e9cb83..5ca7b432705c 100644 --- a/pkg/config/zone.go +++ b/pkg/config/zone.go @@ -364,7 +364,8 @@ func (z *ZoneConfig) Validate() error { // InheritFromParent hydrates a zones missing fields from its parent. func (z *ZoneConfig) InheritFromParent(parent *ZoneConfig) { - if z.NumReplicas == nil { + // Allow for subzonePlaceholders to inherit fields from parents if needed. + if z.NumReplicas == nil || (z.NumReplicas != nil && *z.NumReplicas == 0) { if parent.NumReplicas != nil { z.NumReplicas = proto.Int32(*parent.NumReplicas) } diff --git a/pkg/server/admin.go b/pkg/server/admin.go index 3e495df4a7d5..39110e40172b 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -1626,7 +1626,7 @@ func (s *adminServer) DataDistribution( // Get zone configs. // TODO(vilterp): this can be done in parallel with getting table/db names and replica counts. zoneConfigsQuery := ` - SELECT target, config_sql, config_protobuf + SELECT target, raw_config_sql, raw_config_protobuf FROM crdb_internal.zones WHERE target IS NOT NULL ` diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index fbedf0cbc5fa..1ad9f5ad8105 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -20,6 +20,7 @@ import ( "strings" "time" + "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" @@ -1186,14 +1187,14 @@ CREATE TABLE crdb_internal.create_statements ( // Prepare a query used to see zones configuations on this table. configStmtsQuery := ` SELECT - table_name, config_yaml, config_sql + table_name, raw_config_yaml, raw_config_sql FROM crdb_internal.zones WHERE database_name = '%[1]s' AND table_name IS NOT NULL - AND config_yaml IS NOT NULL - AND config_sql IS NOT NULL + AND raw_config_yaml IS NOT NULL + AND raw_config_sql IS NOT NULL ORDER BY database_name, table_name, index_name, partition_name ` @@ -2099,10 +2100,12 @@ CREATE TABLE crdb_internal.zones ( table_name STRING, index_name STRING, partition_name STRING, - config_yaml STRING NOT NULL, - config_sql STRING, -- this column can be NULL if there is no specifier syntax + raw_config_yaml STRING NOT NULL, + raw_config_sql STRING, -- this column can be NULL if there is no specifier syntax -- possible (e.g. the object was deleted). - config_protobuf BYTES NOT NULL + raw_config_protobuf BYTES NOT NULL, + full_config_yaml STRING NOT NULL, + full_config_sql STRING ) `, populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { @@ -2118,6 +2121,14 @@ CREATE TABLE crdb_internal.zones ( "object with ID %d does not exist", errors.Safe(id)) } + getKey := func(key roachpb.Key) (*roachpb.Value, error) { + kv, err := p.txn.Get(ctx, key) + if err != nil { + return nil, err + } + return kv.Value, nil + } + rows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.Query( ctx, "crdb-internal-zones-table", p.txn, `SELECT id, config FROM system.zones`) if err != nil { @@ -2146,14 +2157,32 @@ CREATE TABLE crdb_internal.zones ( } subzones := configProto.Subzones + // Inherit full information about this zone. + fullZone := configProto + if err := completeZoneConfig(&fullZone, uint32(tree.MustBeDInt(r[0])), getKey); err != nil { + return err + } + + // Write down information about the zone in the table. + // TODO (rohany): We would like to just display information about these + // subzone placeholders, but there are a few tests that depend on this + // behavior, so leave it in for now. if !configProto.IsSubzonePlaceholder() { // Ensure subzones don't infect the value of the config_proto column. configProto.Subzones = nil configProto.SubzoneSpans = nil - if err := generateZoneConfigIntrospectionValues(values, r[0], tree.NewDInt(tree.DInt(0)), zoneSpecifier, &configProto); err != nil { + if err := generateZoneConfigIntrospectionValues( + values, + r[0], + tree.NewDInt(tree.DInt(0)), + zoneSpecifier, + &configProto, + &fullZone, + ); err != nil { return err } + if err := addRow(values...); err != nil { return err } @@ -2179,9 +2208,35 @@ CREATE TABLE crdb_internal.zones ( zoneSpecifier = &zs } - if err := generateZoneConfigIntrospectionValues(values, r[0], tree.NewDInt(tree.DInt(i+1)), zoneSpecifier, &s.Config); err != nil { + // Generate information about full / inherited constraints. + // There are two cases -- the subzone we are looking at refers + // to an index, or to a partition. + subZoneConfig := s.Config + + // In this case, we have an index. Inherit from the parent zone. + if s.PartitionName == "" { + subZoneConfig.InheritFromParent(&fullZone) + } else { + // We have a partition. Get the parent index partition from the zone and + // have it inherit constraints. + if indexSubzone := fullZone.GetSubzone(uint32(index.ID), ""); indexSubzone != nil { + subZoneConfig.InheritFromParent(&indexSubzone.Config) + } + // Inherit remaining fields from the full parent zone. + subZoneConfig.InheritFromParent(&fullZone) + } + + if err := generateZoneConfigIntrospectionValues( + values, + r[0], + tree.NewDInt(tree.DInt(i+1)), + zoneSpecifier, + &s.Config, + &subZoneConfig, + ); err != nil { return err } + if err := addRow(values...); err != nil { return err } @@ -2511,7 +2566,14 @@ CREATE TABLE crdb_internal.gossip_network ( }, } +// addPartitioningRows adds the rows in crdb_internal.partitions for each partition. +// None of the arguments can be nil, and it is used recursively when a list partition +// has subpartitions. In that case, the colOffset argument is incremented to represent +// how many columns of the index have been partitioned already. func addPartitioningRows( + ctx context.Context, + p *planner, + database string, table *sqlbase.TableDescriptor, index *sqlbase.IndexDescriptor, partitioning *sqlbase.PartitioningDescriptor, @@ -2532,7 +2594,7 @@ func addPartitioningRows( } colNames := tree.NewDString(buf.String()) - var a sqlbase.DatumAlloc + var datumAlloc sqlbase.DatumAlloc // We don't need real prefixes in the DecodePartitionTuple calls because we // only use the tree.Datums part of the output. @@ -2541,6 +2603,7 @@ func addPartitioningRows( fakePrefixDatums[i] = tree.DNull } + // This produces the list_value column. for _, l := range partitioning.List { var buf bytes.Buffer for j, values := range l.Values { @@ -2548,14 +2611,32 @@ func addPartitioningRows( buf.WriteString(`, `) } tuple, _, err := sqlbase.DecodePartitionTuple( - &a, table, index, partitioning, values, fakePrefixDatums, + &datumAlloc, table, index, partitioning, values, fakePrefixDatums, ) if err != nil { return err } buf.WriteString(tuple.String()) } + + partitionValue := tree.NewDString(buf.String()) name := tree.NewDString(l.Name) + + // Figure out which zone and subzone this partition should correspond to. + zoneID, zone, subzone, err := GetZoneConfigInTxn( + ctx, p.txn, uint32(table.ID), index, l.Name, false /* getInheritedDefault */) + if err != nil { + return err + } + subzoneID := base.SubzoneID(0) + if subzone != nil { + for i, s := range zone.Subzones { + if s.IndexID == subzone.IndexID && s.PartitionName == subzone.PartitionName { + subzoneID = base.SubzoneIDFromIndex(i) + } + } + } + if err := addRow( tableID, indexID, @@ -2563,22 +2644,25 @@ func addPartitioningRows( name, numColumns, colNames, - tree.NewDString(buf.String()), - tree.DNull, + partitionValue, + tree.DNull, /* null value for partition range */ + tree.NewDInt(tree.DInt(zoneID)), + tree.NewDInt(tree.DInt(subzoneID)), ); err != nil { return err } - err := addPartitioningRows(table, index, &l.Subpartitioning, name, + err = addPartitioningRows(ctx, p, database, table, index, &l.Subpartitioning, name, colOffset+int(partitioning.NumColumns), addRow) if err != nil { return err } } + // This produces the range_value column. for _, r := range partitioning.Range { var buf bytes.Buffer fromTuple, _, err := sqlbase.DecodePartitionTuple( - &a, table, index, partitioning, r.FromInclusive, fakePrefixDatums, + &datumAlloc, table, index, partitioning, r.FromInclusive, fakePrefixDatums, ) if err != nil { return err @@ -2586,12 +2670,29 @@ func addPartitioningRows( buf.WriteString(fromTuple.String()) buf.WriteString(" TO ") toTuple, _, err := sqlbase.DecodePartitionTuple( - &a, table, index, partitioning, r.ToExclusive, fakePrefixDatums, + &datumAlloc, table, index, partitioning, r.ToExclusive, fakePrefixDatums, ) if err != nil { return err } buf.WriteString(toTuple.String()) + partitionRange := tree.NewDString(buf.String()) + + // Figure out which zone and subzone this partition should correspond to. + zoneID, zone, subzone, err := GetZoneConfigInTxn( + ctx, p.txn, uint32(table.ID), index, r.Name, false /* getInheritedDefault */) + if err != nil { + return err + } + subzoneID := base.SubzoneID(0) + if subzone != nil { + for i, s := range zone.Subzones { + if s.IndexID == subzone.IndexID && s.PartitionName == subzone.PartitionName { + subzoneID = base.SubzoneIDFromIndex(i) + } + } + } + if err := addRow( tableID, indexID, @@ -2599,8 +2700,10 @@ func addPartitioningRows( tree.NewDString(r.Name), numColumns, colNames, - tree.DNull, - tree.NewDString(buf.String()), + tree.DNull, /* null value for partition list */ + partitionRange, + tree.NewDInt(tree.DInt(zoneID)), + tree.NewDInt(tree.DInt(subzoneID)), ); err != nil { return err } @@ -2624,14 +2727,20 @@ CREATE TABLE crdb_internal.partitions ( columns INT NOT NULL, column_names STRING, list_value STRING, - range_value STRING + range_value STRING, + zone_id INT, -- references a zone id in the crdb_internal.zones table + subzone_id INT -- references a subzone id in the crdb_internal.zones table ) `, populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { + dbName := "" + if dbContext != nil { + dbName = dbContext.Name + } return forEachTableDescAll(ctx, p, dbContext, hideVirtual, /* virtual tables have no partitions*/ func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { return table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error { - return addPartitioningRows(table, index, &index.Partitioning, + return addPartitioningRows(ctx, p, dbName, table, index, &index.Partitioning, tree.DNull /* parentName */, 0 /* colOffset */, addRow) }) }) diff --git a/pkg/sql/delegate/show_partitions.go b/pkg/sql/delegate/show_partitions.go index 6a852bbd7002..8f69da9754f7 100644 --- a/pkg/sql/delegate/show_partitions.go +++ b/pkg/sql/delegate/show_partitions.go @@ -36,6 +36,9 @@ func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Stateme return nil, err } + // We use the raw_config_sql from the partition_lookup result to get the + // official zone config for the partition, and use the full_config_sql from the zones table + // which is the result of looking up the partition's inherited zone configuraion. const showTablePartitionsQuery = ` SELECT tables.database_name, @@ -45,7 +48,8 @@ func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Stateme partitions.column_names, concat(tables.name, '@', table_indexes.index_name) AS index_name, coalesce(partitions.list_value, partitions.range_value) as partition_value, - replace(regexp_extract(config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as zone_config + replace(regexp_extract(partition_lookup.raw_config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as zone_config, + replace(regexp_extract(zones.full_config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as full_zone_config FROM %[3]s.crdb_internal.partitions JOIN %[3]s.crdb_internal.tables ON partitions.table_id = tables.table_id @@ -53,10 +57,13 @@ func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Stateme table_indexes.descriptor_id = tables.table_id AND table_indexes.index_id = partitions.index_id LEFT JOIN %[3]s.crdb_internal.zones ON - zones.database_name = tables.database_name - AND zones.table_name = tables.name - AND zones.index_name = table_indexes.index_name - AND zones.partition_name = partitions.name + partitions.zone_id = zones.zone_id + AND partitions.subzone_id = zones.subzone_id + LEFT JOIN %[3]s.crdb_internal.zones AS partition_lookup ON + partition_lookup.database_name = tables.database_name + AND partition_lookup.table_name = tables.name + AND partition_lookup.index_name = table_indexes.index_name + AND partition_lookup.partition_name = partitions.name WHERE tables.name = %[1]s AND tables.database_name = %[2]s; ` @@ -74,7 +81,8 @@ func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Stateme partitions.column_names, concat(tables.name, '@', table_indexes.index_name) AS index_name, coalesce(partitions.list_value, partitions.range_value) as partition_value, - replace(regexp_extract(config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as zone_config + replace(regexp_extract(partition_lookup.raw_config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as zone_config, + replace(regexp_extract(zones.full_config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as full_zone_config FROM %[1]s.crdb_internal.partitions JOIN %[1]s.crdb_internal.tables ON partitions.table_id = tables.table_id @@ -82,10 +90,13 @@ func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Stateme table_indexes.descriptor_id = tables.table_id AND table_indexes.index_id = partitions.index_id LEFT JOIN %[1]s.crdb_internal.zones ON - zones.database_name = tables.database_name - AND zones.table_name = tables.name - AND zones.index_name = table_indexes.index_name - AND zones.partition_name = partitions.name + partitions.zone_id = zones.zone_id + AND partitions.subzone_id = zones.subzone_id + LEFT JOIN %[1]s.crdb_internal.zones AS partition_lookup ON + partition_lookup.database_name = tables.database_name + AND partition_lookup.table_name = tables.name + AND partition_lookup.index_name = table_indexes.index_name + AND partition_lookup.partition_name = partitions.name WHERE tables.database_name = %[2]s ORDER BY @@ -130,7 +141,8 @@ func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Stateme partitions.column_names, concat(tables.name, '@', table_indexes.index_name) AS index_name, coalesce(partitions.list_value, partitions.range_value) as partition_value, - replace(regexp_extract(config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as zone_config + replace(regexp_extract(partition_lookup.raw_config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as zone_config, + replace(regexp_extract(zones.full_config_sql, 'CONFIGURE ZONE USING\n((?s:.)*)'), e'\t', '') as full_zone_config FROM %[5]s.crdb_internal.partitions JOIN %[5]s.crdb_internal.table_indexes ON @@ -138,10 +150,13 @@ func (d *delegator) delegateShowPartitions(n *tree.ShowPartitions) (tree.Stateme AND partitions.table_id = table_indexes.descriptor_id JOIN %[5]s.crdb_internal.tables ON table_indexes.descriptor_id = tables.table_id LEFT JOIN %[5]s.crdb_internal.zones ON - zones.database_name = tables.database_name - AND zones.table_name = tables.name - AND zones.index_name = table_indexes.index_name - AND zones.partition_name = partitions.name + partitions.zone_id = zones.zone_id + AND partitions.subzone_id = zones.subzone_id + LEFT JOIN %[5]s.crdb_internal.zones AS partition_lookup ON + partition_lookup.database_name = tables.database_name + AND partition_lookup.table_name = tables.name + AND partition_lookup.index_name = table_indexes.index_name + AND partition_lookup.partition_name = partitions.name WHERE table_indexes.index_name = %[1]s AND tables.name = %[2]s; ` diff --git a/pkg/sql/logictest/testdata/logic_test/crdb_internal b/pkg/sql/logictest/testdata/logic_test/crdb_internal index 74a429fa9d25..6d570308251a 100644 --- a/pkg/sql/logictest/testdata/logic_test/crdb_internal +++ b/pkg/sql/logictest/testdata/logic_test/crdb_internal @@ -209,11 +209,11 @@ SELECT * FROM crdb_internal.forward_dependencies WHERE descriptor_name = '' ---- descriptor_id descriptor_name index_id dependedonby_id dependedonby_type dependedonby_index_id dependedonby_name dependedonby_details -query IITTTTTTTTT colnames +query IITTTTTTTTTTT colnames SELECT * FROM crdb_internal.zones WHERE false ---- zone_id subzone_id target range_name database_name table_name index_name partition_name -config_yaml config_sql config_protobuf +raw_config_yaml raw_config_sql raw_config_protobuf full_config_yaml full_config_sql query ITTTTTTTTTTTTI colnames SELECT * FROM crdb_internal.ranges WHERE range_id < 0 @@ -227,9 +227,9 @@ range_id start_key start_pretty end_key end_pretty database_name table_nam statement ok INSERT INTO system.zones (id, config) VALUES - (18, (SELECT config_protobuf FROM crdb_internal.zones WHERE zone_id = 0)), - (53, (SELECT config_protobuf FROM crdb_internal.zones WHERE zone_id = 0)), - (54, (SELECT config_protobuf FROM crdb_internal.zones WHERE zone_id = 0)) + (18, (SELECT raw_config_protobuf FROM crdb_internal.zones WHERE zone_id = 0)), + (53, (SELECT raw_config_protobuf FROM crdb_internal.zones WHERE zone_id = 0)), + (54, (SELECT raw_config_protobuf FROM crdb_internal.zones WHERE zone_id = 0)) query IT SELECT zone_id, target FROM crdb_internal.zones ORDER BY 1 @@ -247,12 +247,12 @@ SELECT zone_id, target FROM crdb_internal.zones ORDER BY 1 54 TABLE testdb.public.foo query T -SELECT quote_literal(config_yaml) FROM crdb_internal.zones WHERE zone_id = 0 +SELECT quote_literal(raw_config_yaml) FROM crdb_internal.zones WHERE zone_id = 0 ---- e'range_min_bytes: 16777216\nrange_max_bytes: 67108864\ngc:\n ttlseconds: 90000\nnum_replicas: 3\nconstraints: []\nlease_preferences: []\n' query T -SELECT config_sql FROM crdb_internal.zones WHERE zone_id = 0 +SELECT raw_config_sql FROM crdb_internal.zones WHERE zone_id = 0 ---- ALTER RANGE default CONFIGURE ZONE USING range_min_bytes = 16777216, diff --git a/pkg/sql/logictest/testdata/logic_test/show_source b/pkg/sql/logictest/testdata/logic_test/show_source index f59119e3eda2..4cfb068b19b5 100644 --- a/pkg/sql/logictest/testdata/logic_test/show_source +++ b/pkg/sql/logictest/testdata/logic_test/show_source @@ -98,12 +98,12 @@ test query TT colnames SELECT * FROM [SHOW ZONE CONFIGURATIONS] LIMIT 0 ---- -target config_sql +target raw_config_sql query TT colnames SELECT * FROM [SHOW ZONE CONFIGURATION FOR TABLE system.users] LIMIT 0 ---- -target config_sql +target raw_config_sql query T colnames,rowsort SELECT * FROM [SHOW DATABASES] diff --git a/pkg/sql/logictest/testdata/logic_test/zone_config b/pkg/sql/logictest/testdata/logic_test/zone_config index 2edea785b6cf..ad7e515eda6f 100644 --- a/pkg/sql/logictest/testdata/logic_test/zone_config +++ b/pkg/sql/logictest/testdata/logic_test/zone_config @@ -4,7 +4,7 @@ statement ok ALTER RANGE default CONFIGURE ZONE USING num_replicas = 1 query IT -SELECT zone_id, config_sql FROM [SHOW ZONE CONFIGURATION FOR RANGE default] +SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR RANGE default] ---- 0 ALTER RANGE default CONFIGURE ZONE USING range_min_bytes = 16777216, @@ -20,7 +20,7 @@ statement ok ALTER RANGE default CONFIGURE ZONE USING DEFAULT query IT -SELECT zone_id, config_sql FROM [SHOW ZONE CONFIGURATION FOR RANGE default] +SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR RANGE default] ---- 0 ALTER RANGE default CONFIGURE ZONE USING range_min_bytes = 16777216, @@ -41,7 +41,7 @@ CREATE TABLE a (id INT PRIMARY KEY) # Ensure that SHOW ZONE CONFIGURATION retrieves the default zone (ID 0) if # no zone was set. query IT -SELECT zone_id, config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] +SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] ---- 0 ALTER RANGE default CONFIGURE ZONE USING range_min_bytes = 1234567, @@ -58,7 +58,7 @@ statement ok ALTER TABLE a CONFIGURE ZONE USING DEFAULT query IT -SELECT zone_id, config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] +SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] ---- 53 ALTER TABLE a CONFIGURE ZONE USING range_min_bytes = 1234567, @@ -79,7 +79,7 @@ ALTER TABLE a CONFIGURE ZONE USING lease_preferences = '[[+region=test]]' query IT -SELECT zone_id, config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] +SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] ---- 53 ALTER TABLE a CONFIGURE ZONE USING range_min_bytes = 200001, @@ -94,7 +94,7 @@ statement ok ALTER TABLE a CONFIGURE ZONE USING range_max_bytes = 400000 query IT -SELECT zone_id, config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] +SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] ---- 53 ALTER TABLE a CONFIGURE ZONE USING range_min_bytes = 200001, @@ -112,7 +112,7 @@ ALTER TABLE a CONFIGURE ZONE USING DEFAULT # Note: the range_min_bytes here should reflect the non-standard # default that was set initially. query IT -SELECT zone_id, config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] +SELECT zone_id, raw_config_sql FROM [SHOW ZONE CONFIGURATION FOR TABLE a] ---- 53 ALTER TABLE a CONFIGURE ZONE USING range_min_bytes = 1234567, diff --git a/pkg/sql/opt/exec/execbuilder/testdata/subquery b/pkg/sql/opt/exec/execbuilder/testdata/subquery index 28c3e9c05454..c70fb013fd90 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/subquery +++ b/pkg/sql/opt/exec/execbuilder/testdata/subquery @@ -278,7 +278,7 @@ WHERE CAST( CASE WHEN ( EXISTS( - SELECT ref_1.config_yaml AS c0 + SELECT ref_1.raw_config_yaml AS c0 FROM crdb_internal.zones AS ref_1 WHERE subq_0.c1 IS NOT NULL ) @@ -307,7 +307,7 @@ WHERE WHEN ( '12'::STRING = ANY ( - SELECT ref_1.config_yaml AS c0 + SELECT ref_1.raw_config_yaml AS c0 FROM crdb_internal.zones AS ref_1 WHERE subq_0.c1 IS NOT NULL ) diff --git a/pkg/sql/show_zone_config.go b/pkg/sql/show_zone_config.go index c119b6571fa4..a3ef7eaa3f90 100644 --- a/pkg/sql/show_zone_config.go +++ b/pkg/sql/show_zone_config.go @@ -35,9 +35,11 @@ var showZoneConfigColumns = sqlbase.ResultColumns{ {Name: "table_name", Typ: types.String, Hidden: true}, {Name: "index_name", Typ: types.String, Hidden: true}, {Name: "partition_name", Typ: types.String, Hidden: true}, - {Name: "config_yaml", Typ: types.String, Hidden: true}, - {Name: "config_sql", Typ: types.String}, - {Name: "config_protobuf", Typ: types.Bytes, Hidden: true}, + {Name: "raw_config_yaml", Typ: types.String, Hidden: true}, + {Name: "raw_config_sql", Typ: types.String}, + {Name: "raw_config_protobuf", Typ: types.Bytes, Hidden: true}, + {Name: "full_config_yaml", Typ: types.String, Hidden: true}, + {Name: "full_config_sql", Typ: types.String, Hidden: true}, } // These must match showZoneConfigColumns. @@ -50,9 +52,11 @@ const ( tableNameCol indexNameCol partitionNameCol - configYAMLCol - configSQLCol - configProtobufCol + rawConfigYAMLCol + rawConfigSQLCol + rawConfigProtobufCol + fullConfigYamlCol + fullConfigSQLCol ) func (p *planner) ShowZoneConfig(ctx context.Context, n *tree.ShowZoneConfig) (planNode, error) { @@ -144,25 +148,81 @@ func getShowZoneConfigRow( vals := make(tree.Datums, len(showZoneConfigColumns)) if err := generateZoneConfigIntrospectionValues( - vals, tree.NewDInt(tree.DInt(zoneID)), tree.NewDInt(tree.DInt(subZoneIdx)), &zs, zone, + vals, tree.NewDInt(tree.DInt(zoneID)), tree.NewDInt(tree.DInt(subZoneIdx)), &zs, zone, nil, ); err != nil { return nil, err } return vals, nil } +// zoneConfigToSQL pretty prints a zone configuration as a SQL string. +func zoneConfigToSQL(zs *tree.ZoneSpecifier, zone *config.ZoneConfig) (string, error) { + constraints, err := yamlMarshalFlow(config.ConstraintsList{ + Constraints: zone.Constraints, + Inherited: zone.InheritedConstraints}) + if err != nil { + return "", err + } + constraints = strings.TrimSpace(constraints) + prefs, err := yamlMarshalFlow(zone.LeasePreferences) + if err != nil { + return "", err + } + prefs = strings.TrimSpace(prefs) + + useComma := false + f := tree.NewFmtCtx(tree.FmtParsable) + f.WriteString("ALTER ") + f.FormatNode(zs) + f.WriteString(" CONFIGURE ZONE USING\n") + if zone.RangeMinBytes != nil { + f.Printf("\trange_min_bytes = %d", *zone.RangeMinBytes) + useComma = true + } + if zone.RangeMaxBytes != nil { + writeComma(f, useComma) + f.Printf("\trange_max_bytes = %d", *zone.RangeMaxBytes) + useComma = true + } + if zone.GC != nil { + writeComma(f, useComma) + f.Printf("\tgc.ttlseconds = %d", zone.GC.TTLSeconds) + useComma = true + } + if zone.NumReplicas != nil { + writeComma(f, useComma) + f.Printf("\tnum_replicas = %d", *zone.NumReplicas) + useComma = true + } + if !zone.InheritedConstraints { + writeComma(f, useComma) + f.Printf("\tconstraints = %s", lex.EscapeSQLString(constraints)) + useComma = true + } + if !zone.InheritedLeasePreferences { + writeComma(f, useComma) + f.Printf("\tlease_preferences = %s", lex.EscapeSQLString(prefs)) + } + return f.String(), nil +} + // generateZoneConfigIntrospectionValues creates a result row // suitable for populating crdb_internal.zones or SHOW ZONE CONFIG. // The values are populated into the `values` first argument. // The caller is responsible for creating the DInt for the ID and // provide it as 2nd argument. The function will compute // the remaining values based on the zone specifier and configuration. +// The fullZoneConfig argument is a zone config populated with all +// inherited zone configuration information. If this argument is nil, +// then the zone argument is used to populate the full_config_sql and +// full_config_yaml columns. func generateZoneConfigIntrospectionValues( values tree.Datums, zoneID tree.Datum, subZoneID tree.Datum, zs *tree.ZoneSpecifier, zone *config.ZoneConfig, + fullZoneConfig *config.ZoneConfig, ) error { // Populate the ID column. values[zoneIDCol] = zoneID @@ -200,59 +260,17 @@ func generateZoneConfigIntrospectionValues( if err != nil { return err } - values[configYAMLCol] = tree.NewDString(string(yamlConfig)) + values[rawConfigYAMLCol] = tree.NewDString(string(yamlConfig)) // Populate the SQL column. if zs == nil { - values[configSQLCol] = tree.DNull + values[rawConfigSQLCol] = tree.DNull } else { - constraints, err := yamlMarshalFlow(config.ConstraintsList{ - Constraints: zone.Constraints, - Inherited: zone.InheritedConstraints}) + sqlStr, err := zoneConfigToSQL(zs, zone) if err != nil { return err } - constraints = strings.TrimSpace(constraints) - prefs, err := yamlMarshalFlow(zone.LeasePreferences) - if err != nil { - return err - } - prefs = strings.TrimSpace(prefs) - - useComma := false - f := tree.NewFmtCtx(tree.FmtParsable) - f.WriteString("ALTER ") - f.FormatNode(zs) - f.WriteString(" CONFIGURE ZONE USING\n") - if zone.RangeMinBytes != nil { - f.Printf("\trange_min_bytes = %d", *zone.RangeMinBytes) - useComma = true - } - if zone.RangeMaxBytes != nil { - writeComma(f, useComma) - f.Printf("\trange_max_bytes = %d", *zone.RangeMaxBytes) - useComma = true - } - if zone.GC != nil { - writeComma(f, useComma) - f.Printf("\tgc.ttlseconds = %d", zone.GC.TTLSeconds) - useComma = true - } - if zone.NumReplicas != nil { - writeComma(f, useComma) - f.Printf("\tnum_replicas = %d", *zone.NumReplicas) - useComma = true - } - if !zone.InheritedConstraints { - writeComma(f, useComma) - f.Printf("\tconstraints = %s", lex.EscapeSQLString(constraints)) - useComma = true - } - if !zone.InheritedLeasePreferences { - writeComma(f, useComma) - f.Printf("\tlease_preferences = %s", lex.EscapeSQLString(prefs)) - } - values[configSQLCol] = tree.NewDString(f.String()) + values[rawConfigSQLCol] = tree.NewDString(sqlStr) } // Populate the protobuf column. @@ -260,8 +278,29 @@ func generateZoneConfigIntrospectionValues( if err != nil { return err } - values[configProtobufCol] = tree.NewDBytes(tree.DBytes(protoConfig)) + values[rawConfigProtobufCol] = tree.NewDBytes(tree.DBytes(protoConfig)) + + // Populate the full_config_yaml and full_config_sql columns. + inheritedConfig := fullZoneConfig + if inheritedConfig == nil { + inheritedConfig = zone + } + + yamlConfig, err = yaml.Marshal(inheritedConfig) + if err != nil { + return err + } + values[fullConfigYamlCol] = tree.NewDString(string(yamlConfig)) + if zs == nil { + values[fullConfigSQLCol] = tree.DNull + } else { + sqlStr, err := zoneConfigToSQL(zs, inheritedConfig) + if err != nil { + return err + } + values[fullConfigSQLCol] = tree.NewDString(sqlStr) + } return nil } diff --git a/pkg/storage/reports/constraint_report.go b/pkg/storage/reports/constraint_report.go index fae6df17e15c..077716e19ff8 100644 --- a/pkg/storage/reports/constraint_report.go +++ b/pkg/storage/reports/constraint_report.go @@ -16,6 +16,7 @@ import ( "strings" "time" + "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -169,7 +170,7 @@ func (r *replicationConstraintStatsReportSaver) ensureEntries( } } for i, sz := range zone.Subzones { - szKey := ZoneKey{ZoneID: key.ZoneID, SubzoneID: SubzoneIDFromIndex(i)} + szKey := ZoneKey{ZoneID: key.ZoneID, SubzoneID: base.SubzoneIDFromIndex(i)} r.ensureEntries(szKey, &sz.Config) } } @@ -219,7 +220,7 @@ func (r *replicationConstraintStatsReportSaver) loadPreviousVersion( for _, row := range rows { key := ConstraintStatusKey{} key.ZoneID = (uint32)(*row[0].(*tree.DInt)) - key.SubzoneID = SubzoneID((*row[1].(*tree.DInt))) + key.SubzoneID = base.SubzoneID((*row[1].(*tree.DInt))) key.ViolationType = (ConstraintType)(*row[2].(*tree.DString)) key.Constraint = (ConstraintRepr)(*row[3].(*tree.DString)) r.previousVersion[key] = ConstraintStatus{(int)(*row[4].(*tree.DInt))} diff --git a/pkg/storage/reports/locality_report.go b/pkg/storage/reports/locality_report.go index 5adbcefb3ec8..df32dd091e28 100644 --- a/pkg/storage/reports/locality_report.go +++ b/pkg/storage/reports/locality_report.go @@ -16,6 +16,7 @@ import ( "strings" "time" + "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -133,7 +134,7 @@ func (r *replicationCriticalLocalitiesReportSaver) loadPreviousVersion( for _, row := range rows { key := localityKey{} key.ZoneID = (uint32)(*row[0].(*tree.DInt)) - key.SubzoneID = SubzoneID(*row[1].(*tree.DInt)) + key.SubzoneID = base.SubzoneID(*row[1].(*tree.DInt)) key.locality = (LocalityRepr)(*row[2].(*tree.DString)) r.previousVersion[key] = localityStatus{(int32)(*row[3].(*tree.DInt))} } diff --git a/pkg/storage/reports/range_report.go b/pkg/storage/reports/range_report.go index 5cb417985c55..f32cb7280eb8 100644 --- a/pkg/storage/reports/range_report.go +++ b/pkg/storage/reports/range_report.go @@ -14,6 +14,7 @@ import ( "context" "time" + "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -136,7 +137,7 @@ func (r *replicationStatsReportSaver) loadPreviousVersion( for _, row := range rows { key := ZoneKey{} key.ZoneID = (uint32)(*row[0].(*tree.DInt)) - key.SubzoneID = SubzoneID(*row[1].(*tree.DInt)) + key.SubzoneID = base.SubzoneID(*row[1].(*tree.DInt)) r.previousVersion[key] = zoneRangeStatus{ (int32)(*row[2].(*tree.DInt)), (int32)(*row[3].(*tree.DInt)), @@ -322,7 +323,7 @@ func (v *replicationStatsVisitor) ensureEntries(key ZoneKey, zone *config.ZoneCo v.report.EnsureEntry(key) } for i, sz := range zone.Subzones { - v.ensureEntries(MakeZoneKey(key.ZoneID, SubzoneIDFromIndex(i)), &sz.Config) + v.ensureEntries(MakeZoneKey(key.ZoneID, base.SubzoneIDFromIndex(i)), &sz.Config) } } diff --git a/pkg/storage/reports/reporter.go b/pkg/storage/reports/reporter.go index 16b4e7e5b333..9503ad0d0ec5 100644 --- a/pkg/storage/reports/reporter.go +++ b/pkg/storage/reports/reporter.go @@ -14,6 +14,7 @@ import ( "context" "time" + "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" @@ -320,7 +321,7 @@ func visitZones( // Try subzones. subzone, subzoneIdx := zone.GetSubzoneForKeySuffix(keySuffix) if subzone != nil { - if visitor(ctx, &subzone.Config, MakeZoneKey(id, SubzoneIDFromIndex(int(subzoneIdx)))) { + if visitor(ctx, &subzone.Config, MakeZoneKey(id, base.SubzoneIDFromIndex(int(subzoneIdx)))) { return true, nil } } diff --git a/pkg/storage/reports/reporter_test.go b/pkg/storage/reports/reporter_test.go index d91cafd4535d..2a24b889f30e 100644 --- a/pkg/storage/reports/reporter_test.go +++ b/pkg/storage/reports/reporter_test.go @@ -38,7 +38,7 @@ import ( "github.com/cockroachdb/errors" "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" ) type zone struct { @@ -704,7 +704,7 @@ func addIndexSubzones( Config: idx.zone.toZoneConfig(), }) objects[fmt.Sprintf("%s.%s", tableDesc.Name, idx.name)] = - MakeZoneKey(uint32(tableDesc.ID), SubzoneID(len(res.Subzones))) + MakeZoneKey(uint32(tableDesc.ID), base.SubzoneID(len(res.Subzones))) } for _, p := range idx.partitions { @@ -721,7 +721,7 @@ func addIndexSubzones( } else { objectName = fmt.Sprintf("%s.%s.%s", tableDesc.Name, idx.name, p.name) } - objects[objectName] = MakeZoneKey(uint32(tableDesc.ID), SubzoneID(len(res.Subzones))) + objects[objectName] = MakeZoneKey(uint32(tableDesc.ID), base.SubzoneID(len(res.Subzones))) } } return res diff --git a/pkg/storage/reports/zone_key.go b/pkg/storage/reports/zone_key.go index 7fdd8ce384d6..84ffd1f79bce 100644 --- a/pkg/storage/reports/zone_key.go +++ b/pkg/storage/reports/zone_key.go @@ -10,7 +10,11 @@ package reports -import "fmt" +import ( + "fmt" + + "github.com/cockroachdb/cockroach/pkg/base" +) // ZoneKey is the index of the first level in the constraint conformance report. type ZoneKey struct { @@ -19,29 +23,18 @@ type ZoneKey struct { // SubzoneID identifies what subzone, if any, this key is referencing. The // zero value (also named NoSubzone) indicates that the key is referring to a // zone, not a subzone. - SubzoneID SubzoneID + SubzoneID base.SubzoneID } // NoSubzone is used inside a zoneKey to indicate that the key represents a // zone, not a subzone. -const NoSubzone SubzoneID = 0 - -// SubzoneID represents a subzone within a zone. It's the subzone's index within -// the parent zone + 1; there's no subzone 0 so that 0 can be used as a -// sentinel. -type SubzoneID uint32 - -// SubzoneIDFromIndex turns a subzone's index within its parent zone into its -// SubzoneID. -func SubzoneIDFromIndex(idx int) SubzoneID { - return SubzoneID(idx + 1) -} +const NoSubzone base.SubzoneID = 0 // MakeZoneKey creates a zoneKey. // // Use NoSubzone for subzoneID to indicate that the key references a zone, not a // subzone. -func MakeZoneKey(zoneID uint32, subzoneID SubzoneID) ZoneKey { +func MakeZoneKey(zoneID uint32, subzoneID base.SubzoneID) ZoneKey { return ZoneKey{ ZoneID: zoneID, SubzoneID: subzoneID, diff --git a/pkg/testutils/sqlutils/zone.go b/pkg/testutils/sqlutils/zone.go index 7806ad4ce9c2..8540871accd1 100644 --- a/pkg/testutils/sqlutils/zone.go +++ b/pkg/testutils/sqlutils/zone.go @@ -83,7 +83,7 @@ func VerifyZoneConfigForTarget(t testing.TB, sqlDB *SQLRunner, target string, ro t.Fatal(err) } sqlDB.CheckQueryResults(t, fmt.Sprintf(` -SELECT zone_id, config_protobuf +SELECT zone_id, raw_config_protobuf FROM [SHOW ZONE CONFIGURATION FOR %s]`, target), [][]string{sqlRow}) } @@ -100,7 +100,7 @@ func VerifyAllZoneConfigs(t testing.TB, sqlDB *SQLRunner, rows ...ZoneRow) { t.Fatal(err) } } - sqlDB.CheckQueryResults(t, `SELECT zone_id, config_protobuf FROM crdb_internal.zones`, expected) + sqlDB.CheckQueryResults(t, `SELECT zone_id, raw_config_protobuf FROM crdb_internal.zones`, expected) } // ZoneConfigExists returns whether a zone config with the provided name exists. From e79fc165e68f63335cb2464210bb166b70743f95 Mon Sep 17 00:00:00 2001 From: David Taylor Date: Mon, 30 Sep 2019 13:25:04 +0000 Subject: [PATCH 2/4] storage: log when AddSSTable requests are delayed If the rate-limiting and back-pressure mechanisms kick in, they can dramatically delay requests in some cases. However there is currently it can be unclear that this is happening and the system may simply appear slow. Logging when requests are delayed by more than a second should help identify when this is the cause of slowness. Release note: none. Release justification: low-risk (logging only) change that could significantly help in diagnosing 'stuck' jobs based on logs (which often all we have to go on). --- pkg/storage/store_send.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/storage/store_send.go b/pkg/storage/store_send.go index 81a496c95edb..5e320c53323e 100644 --- a/pkg/storage/store_send.go +++ b/pkg/storage/store_send.go @@ -12,12 +12,14 @@ package storage import ( "context" + "time" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/engine/enginepb" "github.com/cockroachdb/cockroach/pkg/storage/txnwait" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/pkg/errors" ) @@ -56,6 +58,7 @@ func (s *Store) Send( // Limit the number of concurrent AddSSTable requests, since they're expensive // and block all other writes to the same span. if ba.IsSingleAddSSTableRequest() { + before := timeutil.Now() if err := s.limiters.ConcurrentAddSSTableRequests.Begin(ctx); err != nil { return nil, roachpb.NewError(err) } @@ -64,7 +67,12 @@ func (s *Store) Send( if err := s.limiters.AddSSTableRequestRate.Wait(ctx); err != nil { return nil, roachpb.NewError(err) } + beforeEngineDelay := timeutil.Now() s.engine.PreIngestDelay(ctx) + if waited := timeutil.Since(before); waited > time.Second { + log.Infof(ctx, "SST ingestion was delayed by %v (%v for storage engine back-pressure)", + waited, timeutil.Since(beforeEngineDelay)) + } } if err := ba.SetActiveTimestamp(s.Clock().Now); err != nil { From ea0b46290773d34dd5e3949670b7a0d37307c5fa Mon Sep 17 00:00:00 2001 From: David Taylor Date: Mon, 30 Sep 2019 13:29:48 +0000 Subject: [PATCH 3/4] bulk: add more timings to debugging logging This tracks and logs time spent in the various stages of ingestion - sorting, splitting and flushing. This helps when trying to diagnose why a job is 'slow' or 'stuck'. Release note: none. Release justification: low-risk (logging only) changes that improve ability to diagnose problems. --- pkg/storage/bulk/buffering_adder.go | 29 +++++++++++++++++++++++++++-- pkg/storage/bulk/sst_batcher.go | 21 ++++++++++++++++----- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/pkg/storage/bulk/buffering_adder.go b/pkg/storage/bulk/buffering_adder.go index 9509c1c5441e..5526d19e6f1d 100644 --- a/pkg/storage/bulk/buffering_adder.go +++ b/pkg/storage/bulk/buffering_adder.go @@ -13,6 +13,7 @@ package bulk import ( "context" "sort" + "time" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -21,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/pkg/errors" ) @@ -47,6 +49,8 @@ type BufferingAdder struct { flushCounts struct { total int bufferSize int + totalSort time.Duration + totalFlush time.Duration } // name of the BufferingAdder for the purpose of logging only. @@ -208,9 +212,14 @@ func (b *BufferingAdder) Flush(ctx context.Context) error { before := b.sink.flushCounts beforeSize := b.sink.totalRows.DataSize + beforeSort := timeutil.Now() + sort.Sort(&b.curBuf) mvccKey := engine.MVCCKey{Timestamp: b.timestamp} + beforeFlush := timeutil.Now() + b.flushCounts.totalSort += beforeFlush.Sub(beforeSort) + for i := range b.curBuf.entries { mvccKey.Key = b.curBuf.Key(i) if err := b.sink.AddMVCCKey(ctx, mvccKey, b.curBuf.Value(i)); err != nil { @@ -220,6 +229,7 @@ func (b *BufferingAdder) Flush(ctx context.Context) error { if err := b.sink.Flush(ctx); err != nil { return err } + b.flushCounts.totalFlush += timeutil.Since(beforeFlush) if log.V(3) { written := b.sink.totalRows.DataSize - beforeSize @@ -228,8 +238,23 @@ func (b *BufferingAdder) Flush(ctx context.Context) error { dueToSize := b.sink.flushCounts.sstSize - before.sstSize log.Infof(ctx, - "flushing %s buffer wrote %d SSTs (avg: %s) with %d for splits, %d for size", - sz(b.curBuf.MemSize), files, sz(written/int64(files)), dueToSplits, dueToSize, + "flushing %s buffer wrote %d SSTs (avg: %s) with %d for splits, %d for size, took %v", + sz(b.curBuf.MemSize), files, sz(written/int64(files)), dueToSplits, dueToSize, timeutil.Since(beforeSort), + ) + } + if log.V(4) { + log.Infof(ctx, + "bulk adder %s has ingested %s, spent %v sorting and %v flushing (%v sending, %v splitting). Flushed %d times due to buffer (%s) size. Flushed chunked as %d files (%d after split-retries), %d due to ranges, %d due to sst size.", + b.name, + sz(b.sink.totalRows.DataSize), + b.flushCounts.totalSort, + b.flushCounts.totalFlush, + b.sink.flushCounts.sendWait, + b.sink.flushCounts.splitWait, + b.flushCounts.bufferSize, + sz(b.memAcc.Used()), + b.sink.flushCounts.total, b.sink.flushCounts.files, + b.sink.flushCounts.split, b.sink.flushCounts.sstSize, ) } if b.onFlush != nil { diff --git a/pkg/storage/bulk/sst_batcher.go b/pkg/storage/bulk/sst_batcher.go index c800803e83ae..81c9abdd5acc 100644 --- a/pkg/storage/bulk/sst_batcher.go +++ b/pkg/storage/bulk/sst_batcher.go @@ -69,6 +69,9 @@ type SSTBatcher struct { split int sstSize int files int // a single flush might create multiple files. + + sendWait time.Duration + splitWait time.Duration } // Tracking for if we have "filled" a range in case we want to split/scatter. flushedToCurrentRange uint64 @@ -270,10 +273,13 @@ func (b *SSTBatcher) doFlush(ctx context.Context, reason int, nextKey roachpb.Ke if (b.ms != enginepb.MVCCStats{}) { b.ms.LastUpdateNanos = timeutil.Now().UnixNano() } + + beforeSend := timeutil.Now() files, err := AddSSTable(ctx, b.db, start, end, sstBytes, b.disallowShadowing, b.ms) if err != nil { return err } + b.flushCounts.sendWait += timeutil.Since(beforeSend) b.flushCounts.files += files if b.flushKey != nil { @@ -291,6 +297,8 @@ func (b *SSTBatcher) doFlush(ctx context.Context, reason int, nextKey roachpb.Ke if splitAt, err := keys.EnsureSafeSplitKey(nextKey); err != nil { log.Warning(ctx, err) } else { + beforeSplit := timeutil.Now() + log.VEventf(ctx, 2, "%s added since last split, splitting/scattering for next range at %v", sz(b.flushedToCurrentRange), end) // NB: Passing 'hour' here is technically illegal until 19.2 is // active, but the value will be ignored before that, and we don't @@ -298,6 +306,7 @@ func (b *SSTBatcher) doFlush(ctx context.Context, reason int, nextKey roachpb.Ke if err := b.db.SplitAndScatter(ctx, splitAt, hour); err != nil { log.Warningf(ctx, "failed to split and scatter during ingest: %+v", err) } + b.flushCounts.splitWait += timeutil.Since(beforeSplit) } b.flushedToCurrentRange = 0 } @@ -344,7 +353,7 @@ func AddSSTable( ms enginepb.MVCCStats, ) (int, error) { var files int - now := timeutil.Now().UnixNano() + now := timeutil.Now() iter, err := engine.NewMemSSTIterator(sstBytes, true) if err != nil { return 0, err @@ -354,7 +363,7 @@ func AddSSTable( var stats enginepb.MVCCStats if (ms == enginepb.MVCCStats{}) { stats, err = engine.ComputeStatsGo( - iter, engine.MVCCKey{Key: start}, engine.MVCCKey{Key: end}, now, + iter, engine.MVCCKey{Key: start}, engine.MVCCKey{Key: end}, now.UnixNano(), ) if err != nil { return 0, errors.Wrapf(err, "computing stats for SST [%s, %s)", start, end) @@ -371,10 +380,12 @@ func AddSSTable( if err := func() error { var err error for i := 0; i < maxAddSSTableRetries; i++ { - log.VEventf(ctx, 2, "sending %s AddSSTable [%s,%s)", sz(len(item.sstBytes)), start, end) + log.VEventf(ctx, 2, "sending %s AddSSTable [%s,%s)", sz(len(item.sstBytes)), item.start, item.end) + before := timeutil.Now() // This will fail if the range has split but we'll check for that below. err = db.AddSSTable(ctx, item.start, item.end, item.sstBytes, item.disallowShadowing, &item.stats) if err == nil { + log.VEventf(ctx, 3, "adding %s AddSSTable [%s,%s) took %v", sz(len(item.sstBytes)), item.start, item.end, timeutil.Since(before)) return nil } // This range has split -- we need to split the SST to try again. @@ -387,7 +398,7 @@ func AddSSTable( } right.stats, err = engine.ComputeStatsGo( - iter, engine.MVCCKey{Key: right.start}, engine.MVCCKey{Key: right.end}, now, + iter, engine.MVCCKey{Key: right.start}, engine.MVCCKey{Key: right.end}, now.UnixNano(), ) if err != nil { return err @@ -414,7 +425,7 @@ func AddSSTable( // top level SST which is kept around to iterate over. item.sstBytes = nil } - + log.VEventf(ctx, 3, "AddSSTable [%v, %v) added %d files and took %v", start, end, files, timeutil.Since(now)) return files, nil } From 460f9e74c91037c0893e9cc2b1e542c7f081be53 Mon Sep 17 00:00:00 2001 From: Rohan Yadav Date: Thu, 26 Sep 2019 15:41:16 -0400 Subject: [PATCH 4/4] movr: Add stats collection to movr workload run This PR adds tracking stats for each kind of query in the movr workload so that output is displayed from cockroach workload run. Additionally, this refactors the movr workload to define the work as functions on a worker struct. This hopefully will avoid a common gotcha of having different workers sharing the same not threadsafe histograms object. Release justification: low risk nice to have feature Release note: None --- pkg/workload/movr/movr.go | 201 --------------------- pkg/workload/movr/workload.go | 316 ++++++++++++++++++++++++++++++++++ 2 files changed, 316 insertions(+), 201 deletions(-) create mode 100644 pkg/workload/movr/workload.go diff --git a/pkg/workload/movr/movr.go b/pkg/workload/movr/movr.go index a5aef53195a6..01e7eaca690a 100644 --- a/pkg/workload/movr/movr.go +++ b/pkg/workload/movr/movr.go @@ -11,7 +11,6 @@ package movr import ( - "context" gosql "database/sql" "math" "strings" @@ -21,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/cockroach/pkg/workload" "github.com/cockroachdb/cockroach/pkg/workload/faker" - "github.com/cockroachdb/cockroach/pkg/workload/histogram" "github.com/pkg/errors" "github.com/spf13/pflag" "golang.org/x/exp/rand" @@ -518,202 +516,3 @@ func (g *movr) movrPromoCodesInitialRow(rowIdx int) []interface{} { rulesJSON, // rules } } - -type rideInfo struct { - id string - city string -} - -// Ops implements the Opser interface -func (g *movr) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { - // Initialize the faker in case it hasn't been setup already. - g.fakerOnce.Do(func() { - g.faker = faker.NewFaker() - }) - sqlDatabase, err := workload.SanitizeUrls(g, g.connFlags.DBOverride, urls) - if err != nil { - return workload.QueryLoad{}, err - } - db, err := gosql.Open(`postgres`, strings.Join(urls, ` `)) - if err != nil { - return workload.QueryLoad{}, err - } - - ql := workload.QueryLoad{SQLDatabase: sqlDatabase} - - rng := rand.New(rand.NewSource(g.seed)) - readPercentage := 0.90 - activeRides := []rideInfo{} - - getRandomUser := func(city string) (string, error) { - id, err := uuid.NewV4() - if err != nil { - return "", err - } - var user string - q := ` - SELECT - IFNULL(a, b) - FROM - ( - SELECT - (SELECT id FROM users WHERE city = $1 AND id > $2 ORDER BY id LIMIT 1) - AS a, - (SELECT id FROM users WHERE city = $1 ORDER BY id LIMIT 1) AS b - ); - ` - err = db.QueryRow(q, city, id.String()).Scan(&user) - return user, err - } - - getRandomPromoCode := func() (string, error) { - id, err := uuid.NewV4() - if err != nil { - return "", err - } - q := ` - SELECT - IFNULL(a, b) - FROM - ( - SELECT - (SELECT code FROM promo_codes WHERE code > $1 ORDER BY code LIMIT 1) - AS a, - (SELECT code FROM promo_codes ORDER BY code LIMIT 1) AS b - ); - ` - var code string - err = db.QueryRow(q, id.String()).Scan(&code) - return code, err - } - - getRandomVehicle := func(city string) (string, error) { - id, err := uuid.NewV4() - if err != nil { - return "", err - } - q := ` - SELECT - IFNULL(a, b) - FROM - ( - SELECT - (SELECT id FROM vehicles WHERE city = $1 AND id > $2 ORDER BY id LIMIT 1) - AS a, - (SELECT id FROM vehicles WHERE city = $1 ORDER BY id LIMIT 1) AS b - ); - ` - var vehicle string - err = db.QueryRow(q, city, id.String()).Scan(&vehicle) - return vehicle, err - } - - movrQuerySimulation := func(ctx context.Context) error { - activeCity := randCity(rng) - if rng.Float64() <= readPercentage { - q := `SELECT city, id FROM vehicles WHERE city = $1` - _, err := db.Exec(q, activeCity) - return err - } - // Simulate vehicle location updates. - for i, ride := range activeRides { - if i >= 10 { - break - } - lat, long := randLatLong(rng) - q := `UPSERT INTO vehicle_location_histories VALUES ($1, $2, now(), $3, $4)` - _, err := db.Exec(q, ride.city, ride.id, lat, long) - if err != nil { - return err - } - } - - id, err := uuid.NewV4() - if err != nil { - return err - } - - // Do write operations. - if rng.Float64() < 0.03 { - q := `INSERT INTO promo_codes VALUES ($1, NULL, NULL, NULL, NULL)` - _, err = db.Exec(q, id.String()) - return err - } else if rng.Float64() < 0.1 { - // Apply a promo code to an account. - user, err := getRandomUser(activeCity) - if err != nil { - return err - } - - code, err := getRandomPromoCode() - if err != nil { - return err - } - - // See if the promo code has been used. - var count int - q := `SELECT count(*) FROM user_promo_codes WHERE city = $1 AND user_id = $2 AND code = $3` - err = db.QueryRow(q, activeCity, user, code).Scan(&count) - if err != nil { - return err - } - - // If is has not been, apply the promo code. - if count == 0 { - q = `INSERT INTO user_promo_codes VALUES ($1, $2, $3, NULL, NULL)` - _, err = db.Exec(q, activeCity, user, code) - return err - } - return nil - } else if rng.Float64() < 0.3 { - q := `INSERT INTO users VALUES ($1, $2, NULL, NULL, NULL)` - _, err = db.Exec(q, id.String(), activeCity) - return err - } else if rng.Float64() < 0.1 { - // Simulate adding a new vehicle to the population. - ownerID, err := getRandomUser(activeCity) - if err != nil { - return err - } - - typ := randVehicleType(rng) - q := `INSERT INTO vehicles VALUES ($1, $2, $3, $4, NULL, NULL, NULL, NULL)` - _, err = db.Exec(q, id.String(), activeCity, typ, ownerID) - return err - } else if rng.Float64() < 0.5 { - // Simulate a user starting a ride. - rider, err := getRandomUser(activeCity) - if err != nil { - return err - } - - vehicle, err := getRandomVehicle(activeCity) - if err != nil { - return err - } - - q := `INSERT INTO rides VALUES ($1, $2, $2, $3, $4, $5, NULL, now(), NULL, NULL)` - _, err = db.Exec(q, id.String(), activeCity, rider, vehicle, g.faker.StreetAddress(rng)) - if err != nil { - return err - } - activeRides = append(activeRides, rideInfo{id.String(), activeCity}) - return err - } else { - // Simulate a ride ending. - if len(activeRides) > 1 { - ride := activeRides[0] - activeRides = activeRides[1:] - q := `UPDATE rides SET end_address = $3, end_time = now() WHERE city = $1 AND id = $2` - _, err := db.Exec(q, ride.city, ride.id, g.faker.StreetAddress(rng)) - return err - } - } - - return nil - } - - ql.WorkerFns = append(ql.WorkerFns, movrQuerySimulation) - - return ql, nil -} diff --git a/pkg/workload/movr/workload.go b/pkg/workload/movr/workload.go new file mode 100644 index 000000000000..299d4d0e0453 --- /dev/null +++ b/pkg/workload/movr/workload.go @@ -0,0 +1,316 @@ +// Copyright 2019 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package movr + +import ( + "context" + gosql "database/sql" + "strings" + + "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/uuid" + "github.com/cockroachdb/cockroach/pkg/workload" + "github.com/cockroachdb/cockroach/pkg/workload/faker" + "github.com/cockroachdb/cockroach/pkg/workload/histogram" + "golang.org/x/exp/rand" +) + +type rideInfo struct { + id string + city string +} + +type movrWorker struct { + db *gosql.DB + hists *histogram.Histograms + activeRides []rideInfo + rng *rand.Rand + faker faker.Faker +} + +func (m *movrWorker) getRandomUser(city string) (string, error) { + id, err := uuid.NewV4() + if err != nil { + return "", err + } + var user string + q := ` + SELECT + IFNULL(a, b) + FROM + ( + SELECT + (SELECT id FROM users WHERE city = $1 AND id > $2 ORDER BY id LIMIT 1) + AS a, + (SELECT id FROM users WHERE city = $1 ORDER BY id LIMIT 1) AS b + ); + ` + err = m.db.QueryRow(q, city, id.String()).Scan(&user) + return user, err +} + +func (m *movrWorker) getRandomPromoCode() (string, error) { + id, err := uuid.NewV4() + if err != nil { + return "", err + } + q := ` + SELECT + IFNULL(a, b) + FROM + ( + SELECT + (SELECT code FROM promo_codes WHERE code > $1 ORDER BY code LIMIT 1) + AS a, + (SELECT code FROM promo_codes ORDER BY code LIMIT 1) AS b + ); + ` + var code string + err = m.db.QueryRow(q, id.String()).Scan(&code) + return code, err +} + +func (m *movrWorker) getRandomVehicle(city string) (string, error) { + id, err := uuid.NewV4() + if err != nil { + return "", err + } + q := ` + SELECT + IFNULL(a, b) + FROM + ( + SELECT + (SELECT id FROM vehicles WHERE city = $1 AND id > $2 ORDER BY id LIMIT 1) + AS a, + (SELECT id FROM vehicles WHERE city = $1 ORDER BY id LIMIT 1) AS b + ); + ` + var vehicle string + err = m.db.QueryRow(q, city, id.String()).Scan(&vehicle) + return vehicle, err +} + +func (m *movrWorker) readVehicles(city string) error { + q := `SELECT city, id FROM vehicles WHERE city = $1` + _, err := m.db.Exec(q, city) + return err +} + +func (m *movrWorker) updateActiveRides() error { + for i, ride := range m.activeRides { + if i >= 10 { + break + } + lat, long := randLatLong(m.rng) + q := `UPSERT INTO vehicle_location_histories VALUES ($1, $2, now(), $3, $4)` + _, err := m.db.Exec(q, ride.city, ride.id, lat, long) + if err != nil { + return err + } + } + return nil +} + +func (m *movrWorker) addUser(id uuid.UUID, city string) error { + q := `INSERT INTO users VALUES ($1, $2, NULL, NULL, NULL)` + _, err := m.db.Exec(q, id.String(), city) + return err +} + +func (m *movrWorker) createPromoCode(id uuid.UUID, _ string) error { + q := `INSERT INTO promo_codes VALUES ($1, NULL, NULL, NULL, NULL)` + _, err := m.db.Exec(q, id.String()) + return err +} + +func (m *movrWorker) applyPromoCode(id uuid.UUID, city string) error { + user, err := m.getRandomUser(city) + if err != nil { + return err + } + code, err := m.getRandomPromoCode() + if err != nil { + return err + } + // See if the promo code has been used. + var count int + q := `SELECT count(*) FROM user_promo_codes WHERE city = $1 AND user_id = $2 AND code = $3` + err = m.db.QueryRow(q, city, user, code).Scan(&count) + if err != nil { + return err + } + // If is has not been, apply the promo code. + if count == 0 { + q = `INSERT INTO user_promo_codes VALUES ($1, $2, $3, NULL, NULL)` + _, err = m.db.Exec(q, city, user, code) + return err + } + return nil +} + +func (m *movrWorker) addVehicle(id uuid.UUID, city string) error { + ownerID, err := m.getRandomUser(city) + if err != nil { + return err + } + typ := randVehicleType(m.rng) + q := `INSERT INTO vehicles VALUES ($1, $2, $3, $4, NULL, NULL, NULL, NULL)` + _, err = m.db.Exec(q, id.String(), city, typ, ownerID) + return err +} + +func (m *movrWorker) startRide(id uuid.UUID, city string) error { + rider, err := m.getRandomUser(city) + if err != nil { + return err + } + vehicle, err := m.getRandomVehicle(city) + if err != nil { + return err + } + q := `INSERT INTO rides VALUES ($1, $2, $2, $3, $4, $5, NULL, now(), NULL, NULL)` + _, err = m.db.Exec(q, id.String(), city, rider, vehicle, m.faker.StreetAddress(m.rng)) + if err != nil { + return err + } + m.activeRides = append(m.activeRides, rideInfo{id.String(), city}) + return err +} + +func (m *movrWorker) endRide(id uuid.UUID, city string) error { + if len(m.activeRides) > 1 { + ride := m.activeRides[0] + m.activeRides = m.activeRides[1:] + q := `UPDATE rides SET end_address = $3, end_time = now() WHERE city = $1 AND id = $2` + _, err := m.db.Exec(q, ride.city, ride.id, m.faker.StreetAddress(m.rng)) + return err + } + return nil +} + +func (m *movrWorker) generateWorkSimulation() func(context.Context) error { + const readPercentage = 0.95 + movrWorkloadFns := []struct { + weight float32 + key string + work func(uuid.UUID, string) error + }{ + { + weight: 0.03, + key: "createPromoCode", + work: m.createPromoCode, + }, + { + weight: 0.1, + key: "applyPromoCode", + work: m.applyPromoCode, + }, + { + weight: 0.3, + key: "addUser", + work: m.addUser, + }, + { + weight: 0.1, + key: "addVehicle", + work: m.addVehicle, + }, + { + weight: 0.4, + key: "startRide", + work: m.startRide, + }, + { + weight: 0.07, + key: "endRide", + work: m.endRide, + }, + } + + sum := float32(0.0) + for _, s := range movrWorkloadFns { + sum += s.weight + } + + runAndRecord := func(key string, work func() error) error { + start := timeutil.Now() + err := work() + elapsed := timeutil.Since(start) + if err == nil { + m.hists.Get(key).Record(elapsed) + } + return err + } + + return func(ctx context.Context) error { + activeCity := randCity(m.rng) + id, err := uuid.NewV4() + if err != nil { + return err + } + // Our workload is as follows: with 95% chance, do a simple read operation. + // Else, update all active vehicle locations, then pick a random "write" operation + // weighted by the weights in movrWorkloadFns. + if m.rng.Float64() <= readPercentage { + return runAndRecord("readVehicles", func() error { + return m.readVehicles(activeCity) + }) + } + err = runAndRecord("updateActiveRides", func() error { + return m.updateActiveRides() + }) + if err != nil { + return err + } + randVal := m.rng.Float32() * sum + w := float32(0.0) + for _, s := range movrWorkloadFns { + w += s.weight + if w >= randVal { + return runAndRecord(s.key, func() error { + return s.work(id, activeCity) + }) + } + } + panic("unreachable") + } +} + +// Ops implements the Opser interface +func (g *movr) Ops(urls []string, reg *histogram.Registry) (workload.QueryLoad, error) { + // Initialize the faker in case it hasn't been setup already. + g.fakerOnce.Do(func() { + g.faker = faker.NewFaker() + }) + sqlDatabase, err := workload.SanitizeUrls(g, g.connFlags.DBOverride, urls) + if err != nil { + return workload.QueryLoad{}, err + } + db, err := gosql.Open(`postgres`, strings.Join(urls, ` `)) + if err != nil { + return workload.QueryLoad{}, err + } + + ql := workload.QueryLoad{SQLDatabase: sqlDatabase} + + worker := movrWorker{ + db: db, + rng: rand.New(rand.NewSource(g.seed)), + faker: g.faker, + activeRides: []rideInfo{}, + hists: reg.GetHandle(), + } + + ql.WorkerFns = append(ql.WorkerFns, worker.generateWorkSimulation()) + + return ql, nil +}