diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
index 5056e46d895..ce0ba65f74f 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
@@ -149,6 +149,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
index 3597b37ede7..57413f649e1 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
@@ -149,6 +149,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
index a5322578981..e1f949df6f8 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
@@ -149,6 +149,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
index 12bde509048..18090dd2430 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
@@ -149,6 +149,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
index e452d28606a..2053a75db4c 100644
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
@@ -149,6 +149,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
index a38506ddd7d..6f502c94d32 100644
--- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_across_db_versions | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml
index 283d0451a34..120cf541305 100644
--- a/.github/workflows/cluster_endtoend_vreplication_basic.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
index b316c075614..760912b4818 100644
--- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_cellalias | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml b/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml
index 48f506709e7..d944a253ce3 100644
--- a/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_copy_parallel | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
index a1293e3688e..5bba7760d7e 100644
--- a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_foreign_key_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml b/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml
index 2013d89a83d..1184fe493ef 100644
--- a/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_mariadb_to_mysql | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate.yml b/.github/workflows/cluster_endtoend_vreplication_migrate.yml
index e7a8a8cb5ce..009840800d2 100644
--- a/.github/workflows/cluster_endtoend_vreplication_migrate.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_migrate.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_migrate | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml b/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml
index c0c80a8dc61..9a5935a8907 100644
--- a/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_multi_tenant | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
index 0b80b82fd7f..0a10e37e6c4 100644
--- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_and_materialize | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml
index fcbe7057b4c..f023bf9718b 100644
--- a/.github/workflows/cluster_endtoend_vreplication_v2.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_v2 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/cluster_endtoend_vreplication_vtctldclient_vdiff2_movetables_tz.yml b/.github/workflows/cluster_endtoend_vreplication_vtctldclient_vdiff2_movetables_tz.yml
index 40f2002f9a3..7d96ac60306 100644
--- a/.github/workflows/cluster_endtoend_vreplication_vtctldclient_vdiff2_movetables_tz.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_vtctldclient_vdiff2_movetables_tz.yml
@@ -166,6 +166,10 @@ jobs:
binlog-transaction-compression=ON
EOF
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_vtctldclient_vdiff2_movetables_tz | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml
index fc30974212b..c971466b998 100644
--- a/.github/workflows/codecov.yml
+++ b/.github/workflows/codecov.yml
@@ -111,7 +111,7 @@ jobs:
- name: Upload coverage reports to codecov.io
if: steps.changes.outputs.changed_files == 'true'
- uses: codecov/codecov-action@v4
+ uses: codecov/codecov-action@015f24e6818733317a2da2edd6290ab26238649a # https://github.com/codecov/codecov-action/releases/tag/v5.0.7
with:
fail_ci_if_error: true
verbose: true
diff --git a/.github/workflows/unit_test_evalengine_mysql57.yml b/.github/workflows/unit_test_evalengine_mysql57.yml
index ecc366b38fe..d55b2732c86 100644
--- a/.github/workflows/unit_test_evalengine_mysql57.yml
+++ b/.github/workflows/unit_test_evalengine_mysql57.yml
@@ -163,6 +163,9 @@ jobs:
export NOVTADMINBUILD=1
export VTEVALENGINETEST="1"
+ # We sometimes need to alter the behavior based on the platform we're
+ # testing, e.g. MySQL 5.7 vs 8.0.
+ export CI_DB_PLATFORM="mysql57"
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/unit_test_evalengine_mysql80.yml b/.github/workflows/unit_test_evalengine_mysql80.yml
index e6e802b52d8..96af579742e 100644
--- a/.github/workflows/unit_test_evalengine_mysql80.yml
+++ b/.github/workflows/unit_test_evalengine_mysql80.yml
@@ -153,6 +153,9 @@ jobs:
export NOVTADMINBUILD=1
export VTEVALENGINETEST="1"
+ # We sometimes need to alter the behavior based on the platform we're
+ # testing, e.g. MySQL 5.7 vs 8.0.
+ export CI_DB_PLATFORM="mysql80"
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/unit_test_evalengine_mysql84.yml b/.github/workflows/unit_test_evalengine_mysql84.yml
index 46736dac349..efbe2b0eb9f 100644
--- a/.github/workflows/unit_test_evalengine_mysql84.yml
+++ b/.github/workflows/unit_test_evalengine_mysql84.yml
@@ -153,6 +153,9 @@ jobs:
export NOVTADMINBUILD=1
export VTEVALENGINETEST="1"
+ # We sometimes need to alter the behavior based on the platform we're
+ # testing, e.g. MySQL 5.7 vs 8.0.
+ export CI_DB_PLATFORM="mysql84"
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml
index 3eaf02d1538..eed08e9ce4c 100644
--- a/.github/workflows/unit_test_mysql57.yml
+++ b/.github/workflows/unit_test_mysql57.yml
@@ -163,6 +163,9 @@ jobs:
export NOVTADMINBUILD=1
export VTEVALENGINETEST="0"
+ # We sometimes need to alter the behavior based on the platform we're
+ # testing, e.g. MySQL 5.7 vs 8.0.
+ export CI_DB_PLATFORM="mysql57"
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml
index c036e6dd477..9e0ed7e6977 100644
--- a/.github/workflows/unit_test_mysql80.yml
+++ b/.github/workflows/unit_test_mysql80.yml
@@ -153,6 +153,9 @@ jobs:
export NOVTADMINBUILD=1
export VTEVALENGINETEST="0"
+ # We sometimes need to alter the behavior based on the platform we're
+ # testing, e.g. MySQL 5.7 vs 8.0.
+ export CI_DB_PLATFORM="mysql80"
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/.github/workflows/unit_test_mysql84.yml b/.github/workflows/unit_test_mysql84.yml
index 84447ce390b..5948eb0836a 100644
--- a/.github/workflows/unit_test_mysql84.yml
+++ b/.github/workflows/unit_test_mysql84.yml
@@ -153,6 +153,9 @@ jobs:
export NOVTADMINBUILD=1
export VTEVALENGINETEST="0"
+ # We sometimes need to alter the behavior based on the platform we're
+ # testing, e.g. MySQL 5.7 vs 8.0.
+ export CI_DB_PLATFORM="mysql84"
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/changelog/22.0/22.0.0/summary.md b/changelog/22.0/22.0.0/summary.md
index cb8372cd60e..3f63b2d868f 100644
--- a/changelog/22.0/22.0.0/summary.md
+++ b/changelog/22.0/22.0.0/summary.md
@@ -8,6 +8,7 @@
- **[RPC Changes](#rpc-changes)**
- **[Prefer not promoting a replica that is currently taking a backup](#reparents-prefer-not-backing-up)**
- **[VTOrc Config File Changes](#vtorc-config-file-changes)**
+ - **[Support for More Efficient JSON Replication](#efficient-json-replication)**
- **[Minor Changes](#minor-changes)**
- **[VTTablet Flags](#flags-vttablet)**
- **[Topology read concurrency behaviour changes](#topo-read-concurrency-changes)**
@@ -59,6 +60,12 @@ The following fields can be dynamically changed -
To upgrade to the newer version of the configuration file, first switch to using the flags in your current deployment before upgrading. Then you can switch to using the configuration file in the newer release.
+### Support for More Efficient JSON Replication
+
+In [#7345](https://github.com/vitessio/vitess/pull/17345) we added support for [`--binlog-row-value-options=PARTIAL_JSON`](https://dev.mysql.com/doc/refman/en/replication-options-binary-log.html#sysvar_binlog_row_value_options). You can read more about [this feature added to MySQL 8.0 here](https://dev.mysql.com/blog-archive/efficient-json-replication-in-mysql-8-0/).
+
+If you are using MySQL 8.0 or later and using JSON columns, you can now enable this MySQL feature across your Vitess cluster(s) to lower the disk space needed for binary logs and improve the CPU and memory usage in both `mysqld` (standard intrashard MySQL replication) and `vttablet` ([VReplication](https://vitess.io/docs/reference/vreplication/vreplication/)) without losing any capabilities or features.
+
## Minor Changes
diff --git a/examples/operator/operator.yaml b/examples/operator/operator.yaml
index 4b1b64df1ac..ded85de5285 100644
--- a/examples/operator/operator.yaml
+++ b/examples/operator/operator.yaml
@@ -679,6 +679,9 @@ spec:
maxLength: 256
pattern: ^[^\r\n]*$
type: string
+ minPartSize:
+ format: int64
+ type: integer
region:
minLength: 1
type: string
@@ -1995,6 +1998,9 @@ spec:
maxLength: 256
pattern: ^[^\r\n]*$
type: string
+ minPartSize:
+ format: int64
+ type: integer
region:
minLength: 1
type: string
@@ -3510,6 +3516,14 @@ spec:
mysql80Compatible:
type: string
type: object
+ mysqldExporter:
+ type: string
+ vtbackup:
+ type: string
+ vtorc:
+ type: string
+ vttablet:
+ type: string
type: object
name:
maxLength: 63
@@ -5241,6 +5255,9 @@ spec:
maxLength: 256
pattern: ^[^\r\n]*$
type: string
+ minPartSize:
+ format: int64
+ type: integer
region:
minLength: 1
type: string
@@ -6688,6 +6705,9 @@ spec:
maxLength: 256
pattern: ^[^\r\n]*$
type: string
+ minPartSize:
+ format: int64
+ type: integer
region:
minLength: 1
type: string
diff --git a/go.mod b/go.mod
index 6fd800b80ab..603bc37c62e 100644
--- a/go.mod
+++ b/go.mod
@@ -68,7 +68,7 @@ require (
go.uber.org/mock v0.2.0
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/mod v0.22.0 // indirect
- golang.org/x/net v0.31.0
+ golang.org/x/net v0.33.0
golang.org/x/oauth2 v0.24.0
golang.org/x/sys v0.28.0
golang.org/x/term v0.27.0
@@ -96,6 +96,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3
github.com/aws/smithy-go v1.22.0
github.com/bndr/gotabulate v1.1.2
+ github.com/dustin/go-humanize v1.0.1
github.com/gammazero/deque v0.2.1
github.com/google/safehtml v0.1.0
github.com/hashicorp/go-version v1.7.0
@@ -153,7 +154,6 @@ require (
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
- github.com/dustin/go-humanize v1.0.1 // indirect
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
github.com/ebitengine/purego v0.8.1 // indirect
github.com/envoyproxy/go-control-plane v0.13.1 // indirect
diff --git a/go.sum b/go.sum
index 3a7c5e07599..faf1e24be13 100644
--- a/go.sum
+++ b/go.sum
@@ -660,8 +660,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
-golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
+golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
diff --git a/go/cmd/internal/docgen/docgen.go b/go/cmd/internal/docgen/docgen.go
index eea935ed396..f6c7e6b098d 100644
--- a/go/cmd/internal/docgen/docgen.go
+++ b/go/cmd/internal/docgen/docgen.go
@@ -116,7 +116,6 @@ func restructure(rootDir string, dir string, name string, commands []*cobra.Comm
fullCmdFilename := strings.Join([]string{name, cmd.Name()}, "_")
children := cmd.Commands()
-
switch {
case len(children) > 0:
// Command (top-level or not) with children.
@@ -151,7 +150,6 @@ func restructure(rootDir string, dir string, name string, commands []*cobra.Comm
oldName := filepath.Join(rootDir, fullCmdFilename+".md")
newName := filepath.Join(dir, fullCmdFilename+".md")
-
if err := os.Rename(oldName, newName); err != nil {
return fmt.Errorf("failed to move child command %s to its parent's dir: %w", fullCmdFilename, err)
}
@@ -166,6 +164,14 @@ func restructure(rootDir string, dir string, name string, commands []*cobra.Comm
}
default:
// Top-level command without children. Nothing to restructure.
+ // However we still need to anonymize the homedir in the help text.
+ if cmd.Name() == "help" {
+ // all commands with children have their own "help" subcommand,
+ // which we do not generate docs for
+ continue
+ }
+ f := filepath.Join(dir, fullCmdFilename+".md")
+ _ = anonymizeHomedir(f) // it is possible that the file does not exist, so we ignore the error
continue
}
}
@@ -190,11 +196,14 @@ func anonymizeHomedir(file string) (err error) {
if err != nil {
return err
}
+ if _, err := os.Stat(file); err != nil {
+ return nil
+ }
// We're replacing the stuff inside the square brackets in the example sed
// below:
// 's:Paths to search for config files in. (default \[.*\])$:Paths to search for config files in. (default \[\]):'
- sed := exec.Command("sed", "-i", "-e", fmt.Sprintf("s:%s::i", wd), file)
+ sed := exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:%s:%s:", wd, ""), file)
if out, err := sed.CombinedOutput(); err != nil {
return fmt.Errorf("%w: %s", err, out)
}
@@ -224,7 +233,6 @@ func getCommitID(ref string) (string, error) {
const frontmatter = `---
title: %s
series: %s
-commit: %s
---
`
@@ -240,7 +248,7 @@ func frontmatterFilePrepender(sha string) func(filename string) string {
cmdName = strings.ReplaceAll(cmdName, "_", " ")
- return fmt.Sprintf(frontmatter, cmdName, root, sha)
+ return fmt.Sprintf(frontmatter, cmdName, root)
}
}
diff --git a/go/cmd/internal/docgen/docgen_test.go b/go/cmd/internal/docgen/docgen_test.go
index 2370727cde5..741f5ecc577 100644
--- a/go/cmd/internal/docgen/docgen_test.go
+++ b/go/cmd/internal/docgen/docgen_test.go
@@ -41,7 +41,7 @@ func TestGenerateMarkdownTree(t *testing.T) {
name: "current dir",
dir: "./",
cmd: &cobra.Command{},
- expectErr: false,
+ expectErr: true,
},
{
name: "Permission denied",
diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go
index 54b2eec0840..38b0521f142 100644
--- a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go
+++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go
@@ -17,13 +17,11 @@ limitations under the License.
package vdiff
import (
- "encoding/json"
"fmt"
"html/template"
"io"
"math"
"reflect"
- "sort"
"strings"
"time"
@@ -579,7 +577,7 @@ func buildRecentListings(resp *vtctldatapb.VDiffShowResponse) ([]*listing, error
func displayShowSingleSummary(out io.Writer, format, keyspace, workflowName, uuid string, resp *vtctldatapb.VDiffShowResponse, verbose bool) (vdiff.VDiffState, error) {
state := vdiff.UnknownState
var output string
- summary, err := buildSingleSummary(keyspace, workflowName, uuid, resp, verbose)
+ summary, err := workflow.BuildSummary(keyspace, workflowName, uuid, resp, verbose)
if err != nil {
return state, err
}
@@ -616,225 +614,6 @@ func displayShowSingleSummary(out io.Writer, format, keyspace, workflowName, uui
return state, nil
}
-func buildSingleSummary(keyspace, workflow, uuid string, resp *vtctldatapb.VDiffShowResponse, verbose bool) (*summary, error) {
- summary := &summary{
- Workflow: workflow,
- Keyspace: keyspace,
- UUID: uuid,
- State: vdiff.UnknownState,
- RowsCompared: 0,
- StartedAt: "",
- CompletedAt: "",
- HasMismatch: false,
- Shards: "",
- Reports: make(map[string]map[string]vdiff.DiffReport),
- Errors: make(map[string]string),
- Progress: nil,
- }
-
- var tableSummaryMap map[string]tableSummary
- var reports map[string]map[string]vdiff.DiffReport
- // Keep a tally of the states across all tables in all shards.
- tableStateCounts := map[vdiff.VDiffState]int{
- vdiff.UnknownState: 0,
- vdiff.PendingState: 0,
- vdiff.StartedState: 0,
- vdiff.StoppedState: 0,
- vdiff.ErrorState: 0,
- vdiff.CompletedState: 0,
- }
- // Keep a tally of the summary states across all shards.
- shardStateCounts := map[vdiff.VDiffState]int{
- vdiff.UnknownState: 0,
- vdiff.PendingState: 0,
- vdiff.StartedState: 0,
- vdiff.StoppedState: 0,
- vdiff.ErrorState: 0,
- vdiff.CompletedState: 0,
- }
- // Keep a tally of the approximate total rows to process as we'll use this for our progress
- // report.
- totalRowsToCompare := int64(0)
- var shards []string
- for shard, resp := range resp.TabletResponses {
- first := true
- if resp != nil && resp.Output != nil {
- shards = append(shards, shard)
- qr := sqltypes.Proto3ToResult(resp.Output)
- if tableSummaryMap == nil {
- tableSummaryMap = make(map[string]tableSummary, 0)
- reports = make(map[string]map[string]vdiff.DiffReport, 0)
- }
- for _, row := range qr.Named().Rows {
- // Update the global VDiff summary based on the per shard level summary.
- // Since these values will be the same for all subsequent rows we only use
- // the first row.
- if first {
- first = false
- // Our timestamps are strings in `2022-06-26 20:43:25` format so we sort
- // them lexicographically.
- // We should use the earliest started_at across all shards.
- if sa := row.AsString("started_at", ""); summary.StartedAt == "" || sa < summary.StartedAt {
- summary.StartedAt = sa
- }
- // And we should use the latest completed_at across all shards.
- if ca := row.AsString("completed_at", ""); summary.CompletedAt == "" || ca > summary.CompletedAt {
- summary.CompletedAt = ca
- }
- // If we had an error on the shard, then let's add that to the summary.
- if le := row.AsString("last_error", ""); le != "" {
- summary.Errors[shard] = le
- }
- // Keep track of how many shards are marked as a specific state. We check
- // this combined with the shard.table states to determine the VDiff summary
- // state.
- shardStateCounts[vdiff.VDiffState(strings.ToLower(row.AsString("vdiff_state", "")))]++
- }
-
- // Global VDiff summary updates that take into account the per table details
- // per shard.
- {
- summary.RowsCompared += row.AsInt64("rows_compared", 0)
- totalRowsToCompare += row.AsInt64("table_rows", 0)
-
- // If we had a mismatch on any table on any shard then the global VDiff
- // summary does too.
- if mm, _ := row.ToBool("has_mismatch"); mm {
- summary.HasMismatch = true
- }
- }
-
- // Table summary information that must be accounted for across all shards.
- {
- table := row.AsString("table_name", "")
- if table == "" { // This occurs when the table diff has not started on 1 or more shards
- continue
- }
- // Create the global VDiff table summary object if it doesn't exist.
- if _, ok := tableSummaryMap[table]; !ok {
- tableSummaryMap[table] = tableSummary{
- TableName: table,
- State: vdiff.UnknownState,
- }
-
- }
- ts := tableSummaryMap[table]
- // This is the shard level VDiff table state.
- sts := vdiff.VDiffState(strings.ToLower(row.AsString("table_state", "")))
- tableStateCounts[sts]++
-
- // The error state must be sticky, and we should not override any other
- // known state with completed.
- switch sts {
- case vdiff.CompletedState:
- if ts.State == vdiff.UnknownState {
- ts.State = sts
- }
- case vdiff.ErrorState:
- ts.State = sts
- default:
- if ts.State != vdiff.ErrorState {
- ts.State = sts
- }
- }
-
- diffReport := row.AsString("report", "")
- dr := vdiff.DiffReport{}
- if diffReport != "" {
- err := json.Unmarshal([]byte(diffReport), &dr)
- if err != nil {
- return nil, err
- }
- ts.RowsCompared += dr.ProcessedRows
- ts.MismatchedRows += dr.MismatchedRows
- ts.MatchingRows += dr.MatchingRows
- ts.ExtraRowsTarget += dr.ExtraRowsTarget
- ts.ExtraRowsSource += dr.ExtraRowsSource
- }
- if _, ok := reports[table]; !ok {
- reports[table] = make(map[string]vdiff.DiffReport)
- }
-
- reports[table][shard] = dr
- tableSummaryMap[table] = ts
- }
- }
- }
- }
-
- // The global VDiff summary should progress from pending->started->completed with
- // stopped for any shard and error for any table being sticky for the global summary.
- // We should only consider the VDiff to be complete if it's completed for every table
- // on every shard.
- if shardStateCounts[vdiff.StoppedState] > 0 {
- summary.State = vdiff.StoppedState
- } else if shardStateCounts[vdiff.ErrorState] > 0 || tableStateCounts[vdiff.ErrorState] > 0 {
- summary.State = vdiff.ErrorState
- } else if tableStateCounts[vdiff.StartedState] > 0 {
- summary.State = vdiff.StartedState
- } else if tableStateCounts[vdiff.PendingState] > 0 {
- summary.State = vdiff.PendingState
- } else if tableStateCounts[vdiff.CompletedState] == (len(tableSummaryMap) * len(shards)) {
- // When doing shard consolidations/merges, we cannot rely solely on the
- // vdiff_table state as there are N sources that we process rows from sequentially
- // with each one writing to the shared _vt.vdiff_table record for the target shard.
- // So we only mark the vdiff for the shard as completed when we've finished
- // processing rows from all of the sources -- which is recorded by marking the
- // vdiff done for the shard by setting _vt.vdiff.state = completed.
- if shardStateCounts[vdiff.CompletedState] == len(shards) {
- summary.State = vdiff.CompletedState
- } else {
- summary.State = vdiff.StartedState
- }
- } else {
- summary.State = vdiff.UnknownState
- }
-
- // If the vdiff has been started then we can calculate the progress.
- if summary.State == vdiff.StartedState {
- summary.Progress = BuildProgressReport(summary.RowsCompared, totalRowsToCompare, summary.StartedAt)
- }
-
- sort.Strings(shards) // Sort for predictable output
- summary.Shards = strings.Join(shards, ",")
- summary.TableSummaryMap = tableSummaryMap
- summary.Reports = reports
- if !summary.HasMismatch && !verbose {
- summary.Reports = nil
- summary.TableSummaryMap = nil
- }
- // If we haven't completed the global VDiff then be sure to reflect that with no
- // CompletedAt value.
- if summary.State != vdiff.CompletedState {
- summary.CompletedAt = ""
- }
- return summary, nil
-}
-
-func BuildProgressReport(rowsCompared int64, rowsToCompare int64, startedAt string) *vdiff.ProgressReport {
- report := &vdiff.ProgressReport{}
- if rowsCompared >= 1 {
- // Round to 2 decimal points.
- report.Percentage = math.Round(math.Min((float64(rowsCompared)/float64(rowsToCompare))*100, 100.00)*100) / 100
- }
- if math.IsNaN(report.Percentage) {
- report.Percentage = 0
- }
- pctToGo := math.Abs(report.Percentage - 100.00)
- startTime, _ := time.Parse(vdiff.TimestampFormat, startedAt)
- curTime := time.Now().UTC()
- runTime := curTime.Unix() - startTime.Unix()
- if report.Percentage >= 1 {
- // Calculate how long 1% took, on avg, and multiply that by the % left.
- eta := time.Unix(((int64(runTime)/int64(report.Percentage))*int64(pctToGo))+curTime.Unix(), 1).UTC()
- // Cap the ETA at 1 year out to prevent providing nonsensical ETAs.
- if eta.Before(time.Now().UTC().AddDate(1, 0, 0)) {
- report.ETA = eta.Format(vdiff.TimestampFormat)
- }
- }
- return report
-}
-
func commandShow(cmd *cobra.Command, args []string) error {
format, err := common.GetOutputFormat(cmd)
if err != nil {
diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go
index 8fbff03433d..e27c57f47be 100644
--- a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go
+++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go
@@ -19,7 +19,6 @@ package vdiff
import (
"context"
"fmt"
- "math"
"testing"
"time"
@@ -690,112 +689,3 @@ func TestGetStructNames(t *testing.T) {
want := []string{"A", "B"}
require.EqualValues(t, want, got)
}
-
-func TestBuildProgressReport(t *testing.T) {
- now := time.Now()
- type args struct {
- summary *summary
- rowsToCompare int64
- }
- tests := []struct {
- name string
- args args
- want *vdiff.ProgressReport
- }{
- {
- name: "no progress",
- args: args{
- summary: &summary{RowsCompared: 0},
- rowsToCompare: 100,
- },
- want: &vdiff.ProgressReport{
- Percentage: 0,
- ETA: "", // no ETA
- },
- },
- {
- name: "one third of the way",
- args: args{
- summary: &summary{
- RowsCompared: 33,
- StartedAt: now.Add(-10 * time.Second).UTC().Format(vdiff.TimestampFormat),
- },
- rowsToCompare: 100,
- },
- want: &vdiff.ProgressReport{
- Percentage: 33,
- ETA: now.Add(20 * time.Second).UTC().Format(vdiff.TimestampFormat),
- },
- },
- {
- name: "half way",
- args: args{
- summary: &summary{
- RowsCompared: 5000000000,
- StartedAt: now.Add(-10 * time.Hour).UTC().Format(vdiff.TimestampFormat),
- },
- rowsToCompare: 10000000000,
- },
- want: &vdiff.ProgressReport{
- Percentage: 50,
- ETA: now.Add(10 * time.Hour).UTC().Format(vdiff.TimestampFormat),
- },
- },
- {
- name: "full progress",
- args: args{
- summary: &summary{
- RowsCompared: 100,
- CompletedAt: now.UTC().Format(vdiff.TimestampFormat),
- },
- rowsToCompare: 100,
- },
- want: &vdiff.ProgressReport{
- Percentage: 100,
- ETA: now.UTC().Format(vdiff.TimestampFormat),
- },
- },
- {
- name: "more than in I_S",
- args: args{
- summary: &summary{
- RowsCompared: 100,
- CompletedAt: now.UTC().Format(vdiff.TimestampFormat),
- },
- rowsToCompare: 50,
- },
- want: &vdiff.ProgressReport{
- Percentage: 100,
- ETA: now.UTC().Format(vdiff.TimestampFormat),
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- tt.args.summary.Progress = BuildProgressReport(tt.args.summary.RowsCompared, tt.args.rowsToCompare, tt.args.summary.StartedAt)
- // We always check the percentage
- require.Equal(t, int(tt.want.Percentage), int(tt.args.summary.Progress.Percentage))
-
- // We only check the ETA if there is one.
- if tt.want.ETA != "" {
- // Let's check that we're within 1 second to avoid flakes.
- wantTime, err := time.Parse(vdiff.TimestampFormat, tt.want.ETA)
- require.NoError(t, err)
- var timeDiff float64
- if tt.want.Percentage == 100 {
- completedTime, err := time.Parse(vdiff.TimestampFormat, tt.args.summary.CompletedAt)
- require.NoError(t, err)
- timeDiff = math.Abs(completedTime.Sub(wantTime).Seconds())
- } else {
- startTime, err := time.Parse(vdiff.TimestampFormat, tt.args.summary.StartedAt)
- require.NoError(t, err)
- completedTimeUnix := float64(now.UTC().Unix()-startTime.UTC().Unix()) * (100 / tt.want.Percentage)
- estimatedTime, err := time.Parse(vdiff.TimestampFormat, tt.want.ETA)
- require.NoError(t, err)
- timeDiff = math.Abs(estimatedTime.Sub(startTime).Seconds() - completedTimeUnix)
- }
- require.LessOrEqual(t, timeDiff, 1.0)
- }
- })
- }
-}
diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt
index b4405960711..7bda9048211 100644
--- a/go/flags/endtoend/vtbackup.txt
+++ b/go/flags/endtoend/vtbackup.txt
@@ -195,6 +195,7 @@ Flags:
--remote_operation_timeout duration time to wait for a remote operation (default 15s)
--restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.
--s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
+ --s3_backup_aws_min_partsize int Minimum part size to use, defaults to 5MiB but can be increased due to the dataset size. (default 5242880)
--s3_backup_aws_region string AWS region to use. (default "us-east-1")
--s3_backup_aws_retries int AWS request retries. (default -1)
--s3_backup_force_path_style force the s3 path style.
diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt
index be0f5114e79..c84c5fadf5f 100644
--- a/go/flags/endtoend/vtctld.txt
+++ b/go/flags/endtoend/vtctld.txt
@@ -110,6 +110,7 @@ Flags:
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--remote_operation_timeout duration time to wait for a remote operation (default 15s)
--s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
+ --s3_backup_aws_min_partsize int Minimum part size to use, defaults to 5MiB but can be increased due to the dataset size. (default 5242880)
--s3_backup_aws_region string AWS region to use. (default "us-east-1")
--s3_backup_aws_retries int AWS request retries. (default -1)
--s3_backup_force_path_style force the s3 path style.
diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt
index 48776d0e8e0..bc647fb5347 100644
--- a/go/flags/endtoend/vttablet.txt
+++ b/go/flags/endtoend/vttablet.txt
@@ -303,6 +303,7 @@ Flags:
--restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050'
--retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s)
--s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided).
+ --s3_backup_aws_min_partsize int Minimum part size to use, defaults to 5MiB but can be increased due to the dataset size. (default 5242880)
--s3_backup_aws_region string AWS region to use. (default "us-east-1")
--s3_backup_aws_retries int AWS request retries. (default -1)
--s3_backup_force_path_style force the s3 path style.
diff --git a/go/mysql/binlog/binlog_json.go b/go/mysql/binlog/binlog_json.go
index 03bf604fb2d..51b4fef0ef8 100644
--- a/go/mysql/binlog/binlog_json.go
+++ b/go/mysql/binlog/binlog_json.go
@@ -17,6 +17,7 @@ limitations under the License.
package binlog
import (
+ "bytes"
"encoding/binary"
"fmt"
"math"
@@ -25,9 +26,12 @@ import (
"vitess.io/vitess/go/hack"
"vitess.io/vitess/go/mysql/format"
"vitess.io/vitess/go/mysql/json"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
+
querypb "vitess.io/vitess/go/vt/proto/query"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/vterrors"
)
/*
@@ -44,6 +48,14 @@ https://github.com/shyiko/mysql-binlog-connector-java/pull/119/files
https://github.com/noplay/python-mysql-replication/blob/175df28cc8b536a68522ff9b09dc5440adad6094/pymysqlreplication/packet.py
*/
+type jsonDiffOp uint8
+
+const (
+ jsonDiffOpReplace = jsonDiffOp(iota)
+ jsonDiffOpInsert
+ jsonDiffOpRemove
+)
+
// ParseBinaryJSON provides the parsing function from the mysql binary json
// representation to a JSON value instance.
func ParseBinaryJSON(data []byte) (*json.Value, error) {
@@ -60,6 +72,168 @@ func ParseBinaryJSON(data []byte) (*json.Value, error) {
return node, nil
}
+// ParseBinaryJSONDiff provides the parsing function from the binary MySQL
+// JSON diff representation to an SQL expression. These diffs are included
+// in the AFTER image of PartialUpdateRows events which exist in MySQL 8.0
+// and later when the --binlog-row-value-options=PARTIAL_JSON is used. You
+// can read more about these here:
+// https://dev.mysql.com/blog-archive/efficient-json-replication-in-mysql-8-0/
+// https://dev.mysql.com/worklog/task/?id=2955
+// https://github.com/mysql/mysql-server/blob/trunk/sql-common/json_diff.h
+// https://github.com/mysql/mysql-server/blob/trunk/sql-common/json_diff.cc
+//
+// The binary format for the partial JSON column or JSON diff is:
+// +--------+--------+--------+ +--------+
+// | length | diff_1 | diff_2 | ... | diff_N |
+// +--------+--------+--------+ +--------+
+//
+// Each diff_i represents a single JSON diff. It has the following
+// format:
+// +-----------+-------------+------+ +-------------+------+
+// | operation | path_length | path | ( | data_length | data | )?
+// +-----------+-------------+------+ +-------------+------+
+//
+// The fields are:
+//
+// 1. operation: a single byte containing the JSON diff operation.
+// The possible values are defined by enum_json_diff_operation:
+// REPLACE=0
+// INSERT=1
+// REMOVE=2
+//
+// 2. path_length: an unsigned integer in net_field_length() format.
+//
+// 3. path: a string of 'path_length' bytes containing the JSON path
+// of the update.
+//
+// 4. data_length: an unsigned integer in net_field_length() format.
+//
+// 5. data: a string of 'data_length' bytes containing the JSON
+// document that will be inserted at the position specified by
+// 'path'.
+//
+// data_length and data are omitted if and only if operation=REMOVE.
+//
+// Examples of the resulting SQL expression are:
+// - "" for an empty diff when the column was not updated
+// - "null" for a JSON null
+// - "JSON_REMOVE(%s, _utf8mb4'$.salary')" for a REMOVE operation
+// - "JSON_INSERT(%s, _utf8mb4'$.role', CAST(JSON_QUOTE(_utf8mb4'manager') as JSON))" for an INSERT operation
+// - "JSON_INSERT(JSON_REMOVE(JSON_REPLACE(%s, _utf8mb4'$.day', CAST(JSON_QUOTE(_utf8mb4'tuesday') as JSON)), _utf8mb4'$.favorite_color'), _utf8mb4'$.hobby', CAST(JSON_QUOTE(_utf8mb4'skiing') as JSON))" for a more complex example
+func ParseBinaryJSONDiff(data []byte) (sqltypes.Value, error) {
+ if len(data) == 0 {
+ // An empty diff is used as a way to elide the column from
+ // the AFTER image when it was not updated in the row event.
+ return sqltypes.MakeTrusted(sqltypes.Expression, data), nil
+ }
+
+ diff := bytes.Buffer{}
+ // Reasonable estimate of the space we'll need to build the SQL
+ // expression in order to try and avoid reallocations w/o
+ // overallocating too much.
+ diff.Grow(len(data) + 80)
+ pos := 0
+ outer := false
+ innerStr := ""
+
+ // Create the SQL expression from the data which will consist of
+ // a sequence of JSON_X(col/json, path[, value]) clauses where X
+ // is REPLACE, INSERT, or REMOVE. The data can also be a JSON
+ // null, which is a special case we handle here as well. We take
+ // a binary representation of a vector of JSON diffs, for example:
+ // (REPLACE, '$.a', '7')
+ // (REMOVE, '$.d[0]')
+ // (INSERT, '$.e', '"ee"')
+ // (INSERT, '$.f[1]', '"ff"')
+ // (INSERT, '$.g', '"gg"')
+ // And build an SQL expression from it:
+ // JSON_INSERT(
+ // JSON_INSERT(
+ // JSON_INSERT(
+ // JSON_REMOVE(
+ // JSON_REPLACE(
+ // col, '$.a', 7),
+ // '$.d[0]'),
+ // '$.e', 'ee'),
+ // '$.f[3]', 'ff'),
+ // '$.g', 'gg')
+ for pos < len(data) {
+ opType := jsonDiffOp(data[pos])
+ pos++
+ if outer {
+ // We process the bytes sequentially but build the SQL
+ // expression from the inner most function to the outer most
+ // and thus need to wrap any subsequent functions around the
+ // previous one(s). For example:
+ // - inner: JSON_REPLACE(%s, '$.a', 7)
+ // - outer: JSON_REMOVE(, '$.b')
+ innerStr = diff.String()
+ diff.Reset()
+ }
+ switch opType {
+ case jsonDiffOpReplace:
+ diff.WriteString("JSON_REPLACE(")
+ case jsonDiffOpInsert:
+ diff.WriteString("JSON_INSERT(")
+ case jsonDiffOpRemove:
+ diff.WriteString("JSON_REMOVE(")
+ default:
+ // Can be a JSON null.
+ js, err := ParseBinaryJSON(data)
+ if err == nil && js.Type() == json.TypeNull {
+ return sqltypes.MakeTrusted(sqltypes.Expression, js.MarshalSQLTo(nil)), nil
+ }
+ return sqltypes.Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT,
+ "invalid JSON diff operation: %d", opType)
+ }
+ if outer {
+ // Wrap this outer function around the previous inner one(s).
+ diff.WriteString(innerStr)
+ diff.WriteString(", ")
+ } else { // Only the inner most function has the field name
+ diff.WriteString("%s, ") // This will later be replaced by the field name
+ }
+ outer = true
+
+ // Read the JSON document path that we want to operate on.
+ pathLen, readTo := readVariableLength(data, pos)
+ pos = readTo
+ path := data[pos : pos+pathLen]
+ pos += pathLen
+ // We have to specify the unicode character set for the path we
+ // use in the expression as the connection can be using a different
+ // character set (e.g. vreplication always uses set names binary).
+ // The generated path will look like this: _utf8mb4'$.role'
+ diff.WriteString(sqlparser.Utf8mb4Str)
+ diff.WriteByte('\'')
+ diff.Write(path)
+ diff.WriteByte('\'')
+ if opType == jsonDiffOpRemove { // No value for remove
+ diff.WriteByte(')') // Close the JSON function
+ continue
+ }
+
+ diff.WriteString(", ")
+ // Read the value that we want to set.
+ valueLen, readTo := readVariableLength(data, pos)
+ pos = readTo
+ // Parse the native JSON type and its value that we want to set
+ // (string, number, object, array, null).
+ value, err := ParseBinaryJSON(data[pos : pos+valueLen])
+ if err != nil {
+ return sqltypes.Value{}, vterrors.Wrapf(err,
+ "cannot read JSON diff value for path %q", path)
+ }
+ pos += valueLen
+ // Generate the SQL clause for the JSON value. For example:
+ // CAST(JSON_QUOTE(_utf8mb4'manager') as JSON)
+ diff.Write(value.MarshalSQLTo(nil))
+ diff.WriteByte(')') // Close the JSON function
+ }
+
+ return sqltypes.MakeTrusted(sqltypes.Expression, diff.Bytes()), nil
+}
+
// jsonDataType has the values used in the mysql json binary representation to denote types.
// We have string, literal(true/false/null), number, object or array types.
// large object => doc size > 64K: you get pointers instead of inline values.
@@ -315,7 +489,7 @@ func binparserOpaque(_ jsonDataType, data []byte, pos int) (node *json.Value, er
precision := decimalData[0]
scale := decimalData[1]
metadata := (uint16(precision) << 8) + uint16(scale)
- val, _, err := CellValue(decimalData, 2, TypeNewDecimal, metadata, &querypb.Field{Type: querypb.Type_DECIMAL})
+ val, _, err := CellValue(decimalData, 2, TypeNewDecimal, metadata, &querypb.Field{Type: querypb.Type_DECIMAL}, false)
if err != nil {
return nil, err
}
diff --git a/go/mysql/binlog/rbr.go b/go/mysql/binlog/rbr.go
index 8b95b0daee9..7512413f606 100644
--- a/go/mysql/binlog/rbr.go
+++ b/go/mysql/binlog/rbr.go
@@ -26,9 +26,10 @@ import (
"time"
"vitess.io/vitess/go/sqltypes"
- querypb "vitess.io/vitess/go/vt/proto/query"
- "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
// ZeroTimestamp is the special value 0 for a timestamp.
@@ -130,7 +131,7 @@ func CellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) {
uint32(data[pos+2])<<16|
uint32(data[pos+3])<<24), nil
default:
- return 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported blob/geometry metadata value %v (data: %v pos: %v)", metadata, data, pos)
+ return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unsupported blob/geometry metadata value %v (data: %v pos: %v)", metadata, data, pos)
}
case TypeString:
// This may do String, Enum, and Set. The type is in
@@ -151,7 +152,7 @@ func CellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) {
return l + 1, nil
default:
- return 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported type %v (data: %v pos: %v)", typ, data, pos)
+ return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unsupported type %v (data: %v pos: %v)", typ, data, pos)
}
}
@@ -176,7 +177,7 @@ func printTimestamp(v uint32) *bytes.Buffer {
// byte to determine general shared aspects of types and the querypb.Field to
// determine other info specifically about its underlying column (SQL column
// type, column length, charset, etc)
-func CellValue(data []byte, pos int, typ byte, metadata uint16, field *querypb.Field) (sqltypes.Value, int, error) {
+func CellValue(data []byte, pos int, typ byte, metadata uint16, field *querypb.Field, partialJSON bool) (sqltypes.Value, int, error) {
switch typ {
case TypeTiny:
if sqltypes.IsSigned(field.Type) {
@@ -644,7 +645,7 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, field *querypb.F
return sqltypes.MakeTrusted(querypb.Type_ENUM,
strconv.AppendUint(nil, uint64(val), 10)), 2, nil
default:
- return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected enum size: %v", metadata&0xff)
+ return sqltypes.NULL, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected enum size: %v", metadata&0xff)
}
case TypeSet:
@@ -672,14 +673,20 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, field *querypb.F
uint32(data[pos+2])<<16 |
uint32(data[pos+3])<<24)
default:
- return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported blob metadata value %v (data: %v pos: %v)", metadata, data, pos)
+ return sqltypes.NULL, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unsupported blob metadata value %v (data: %v pos: %v)", metadata, data, pos)
}
pos += int(metadata)
// For JSON, we parse the data, and emit SQL.
if typ == TypeJSON {
- var err error
jsonData := data[pos : pos+l]
+ if partialJSON {
+ val, err := ParseBinaryJSONDiff(jsonData)
+ if err != nil {
+ panic(err)
+ }
+ return val, l + int(metadata), nil
+ }
jsonVal, err := ParseBinaryJSON(jsonData)
if err != nil {
panic(err)
@@ -710,7 +717,7 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, field *querypb.F
return sqltypes.MakeTrusted(querypb.Type_UINT16,
strconv.AppendUint(nil, uint64(val), 10)), 2, nil
default:
- return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected enum size: %v", metadata&0xff)
+ return sqltypes.NULL, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected enum size: %v", metadata&0xff)
}
}
if t == TypeSet {
@@ -776,13 +783,13 @@ func CellValue(data []byte, pos int, typ byte, metadata uint16, field *querypb.F
uint32(data[pos+2])<<16 |
uint32(data[pos+3])<<24)
default:
- return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported geometry metadata value %v (data: %v pos: %v)", metadata, data, pos)
+ return sqltypes.NULL, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unsupported geometry metadata value %v (data: %v pos: %v)", metadata, data, pos)
}
pos += int(metadata)
return sqltypes.MakeTrusted(querypb.Type_GEOMETRY,
data[pos:pos+l]), l + int(metadata), nil
default:
- return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported type %v", typ)
+ return sqltypes.NULL, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unsupported type %v", typ)
}
}
diff --git a/go/mysql/binlog/rbr_test.go b/go/mysql/binlog/rbr_test.go
index 1dfaf90a33e..ce49e587e6c 100644
--- a/go/mysql/binlog/rbr_test.go
+++ b/go/mysql/binlog/rbr_test.go
@@ -550,7 +550,7 @@ func TestCellLengthAndData(t *testing.T) {
}
// Test CellValue.
- out, l, err := CellValue(padded, 1, tcase.typ, tcase.metadata, &querypb.Field{Type: tcase.styp})
+ out, l, err := CellValue(padded, 1, tcase.typ, tcase.metadata, &querypb.Field{Type: tcase.styp}, false)
if err != nil || l != len(tcase.data) || out.Type() != tcase.out.Type() || !bytes.Equal(out.Raw(), tcase.out.Raw()) {
t.Errorf("testcase cellData(%v,%v) returned unexpected result: %v %v %v, was expecting %v %v \nwant: %s\ngot: %s",
tcase.typ, tcase.data, out, l, err, tcase.out, len(tcase.data), tcase.out.Raw(), out.Raw())
diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go
index 5d472230d0e..72c228b594e 100644
--- a/go/mysql/binlog_event.go
+++ b/go/mysql/binlog_event.go
@@ -86,9 +86,20 @@ type BinlogEvent interface {
IsWriteRows() bool
// IsUpdateRows returns true if this is a UPDATE_ROWS_EVENT.
IsUpdateRows() bool
+ // IsPartialUpdateRows returs true if a partial JSON update event
+ // is found. These events are only seen in MySQL 8.0 if the mysqld
+ // instance has binlog_row_value_options=PARTIAL_JSON set.
+ IsPartialUpdateRows() bool
// IsDeleteRows returns true if this is a DELETE_ROWS_EVENT.
IsDeleteRows() bool
+ // IsPseudo is for custom implementations of GTID.
+ IsPseudo() bool
+
+ // IsTransactionPayload returns true if a compressed transaction
+ // payload event is found (binlog_transaction_compression=ON).
+ IsTransactionPayload() bool
+
// Timestamp returns the timestamp from the event header.
Timestamp() uint32
// ServerID returns the server ID from the event header.
@@ -123,8 +134,8 @@ type BinlogEvent interface {
TableMap(BinlogFormat) (*TableMap, error)
// Rows returns a Rows struct representing data from a
// {WRITE,UPDATE,DELETE}_ROWS_EVENT. This is only valid if
- // IsWriteRows(), IsUpdateRows(), or IsDeleteRows() returns
- // true.
+ // IsWriteRows(), IsUpdateRows(), IsPartialUpdateRows(), or
+ // IsDeleteRows() returns true.
Rows(BinlogFormat, *TableMap) (Rows, error)
// TransactionPayload returns a TransactionPayload type which provides
// a GetNextEvent() method to iterate over the events contained within
@@ -141,13 +152,6 @@ type BinlogEvent interface {
// the same event and a nil checksum.
StripChecksum(BinlogFormat) (ev BinlogEvent, checksum []byte, err error)
- // IsPseudo is for custom implementations of GTID.
- IsPseudo() bool
-
- // IsTransactionPayload returns true if a compressed transaction
- // payload event is found (binlog_transaction_compression=ON).
- IsTransactionPayload() bool
-
// Bytes returns the binary representation of the event
Bytes() []byte
}
@@ -266,6 +270,12 @@ type Row struct {
// It is only set for UPDATE and DELETE events.
Identify []byte
+ // If this row was from a PartialUpdateRows event and it contains
+ // 1 or more JSON columns with partial values, then this will be
+ // set as a bitmap of which JSON columns in the AFTER image have
+ // partial values.
+ JSONPartialValues Bitmap
+
// Data is the raw data.
// It is only set for WRITE and UPDATE events.
Data []byte
diff --git a/go/mysql/binlog_event_common.go b/go/mysql/binlog_event_common.go
index c95873614f0..548875c44f7 100644
--- a/go/mysql/binlog_event_common.go
+++ b/go/mysql/binlog_event_common.go
@@ -187,6 +187,11 @@ func (ev binlogEvent) IsUpdateRows() bool {
ev.Type() == eUpdateRowsEventV2
}
+// IsPartialUpdateRows implements BinlogEvent.IsPartialUpdateRows().
+func (ev binlogEvent) IsPartialUpdateRows() bool {
+ return ev.Type() == ePartialUpdateRowsEvent
+}
+
// IsDeleteRows implements BinlogEvent.IsDeleteRows().
// We do not support v0.
func (ev binlogEvent) IsDeleteRows() bool {
diff --git a/go/mysql/binlog_event_filepos.go b/go/mysql/binlog_event_filepos.go
index c71c8346964..b7e6ed9e0f2 100644
--- a/go/mysql/binlog_event_filepos.go
+++ b/go/mysql/binlog_event_filepos.go
@@ -203,6 +203,10 @@ func (ev filePosFakeEvent) IsUpdateRows() bool {
return false
}
+func (ev filePosFakeEvent) IsPartialUpdateRows() bool {
+ return false
+}
+
func (ev filePosFakeEvent) IsDeleteRows() bool {
return false
}
diff --git a/go/mysql/binlog_event_mysql56_test.go b/go/mysql/binlog_event_mysql56_test.go
index 5844779de63..ede2abece99 100644
--- a/go/mysql/binlog_event_mysql56_test.go
+++ b/go/mysql/binlog_event_mysql56_test.go
@@ -27,6 +27,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/replication"
)
@@ -248,3 +249,480 @@ func TestMysql56SemiSyncAck(t *testing.T) {
assert.True(t, e.IsQuery())
}
}
+
+func TestMySQL56PartialUpdateRowsEvent(t *testing.T) {
+ format := BinlogFormat{
+ HeaderSizes: []byte{
+ 0, 13, 0, 8, 0, 0, 0, 0, 4, 0, 4, 0, 0, 0, 98, 0, 4, 26, 8, 0, 0, 0, 8, 8, 8, 2, 0, 0, 0, 10, 10, 10, 42, 42, 0, 18, 52, 0, 10, 40, 0,
+ },
+ ServerVersion: "8.0.40",
+ FormatVersion: 4,
+ HeaderLength: 19,
+ ChecksumAlgorithm: 1,
+ }
+ // This is from the following table structure:
+ // CREATE TABLE `customer` (
+ // `customer_id` bigint NOT NULL AUTO_INCREMENT,
+ // `email` varbinary(128) DEFAULT NULL,
+ // `jd` json DEFAULT NULL,
+ // PRIMARY KEY (`customer_id`)
+ // )
+ tm := &TableMap{
+ Flags: 1,
+ Database: "vt_commerce",
+ Name: "customer",
+ Types: []byte{8, 15, 245},
+ CanBeNull: Bitmap{
+ data: []byte{6},
+ count: 3,
+ },
+ Metadata: []uint16{0, 128, 4},
+ ColumnCollationIDs: []collations.ID{63},
+ }
+
+ testCases := []struct {
+ name string
+ rawEvent []byte
+ numRows int
+ want string
+ }{
+ {
+ name: "INSERT",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='alice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"salary": 100}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='alice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_INSERT(@3, '$.role', 'manager') /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=2 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='bob@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"salary": 99}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=2 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='bob@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_INSERT(@3, '$.role', 'manager') /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=3 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='charlie@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"salary": 99}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=3 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='charlie@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_INSERT(@3, '$.role', 'manager') /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='dan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"salary": 99}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='dan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_INSERT(@3, '$.role', 'manager') /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=5 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='eve@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"salary": 100}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=5 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='eve@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_INSERT(@3, '$.role', 'manager') /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 196, 19, 87, 103, 39, 47, 142, 143, 12, 6, 2, 0, 0, 229, 104, 0, 0, 0, 0, 176, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 255, 255, 0, 1, 0, 0, 0, 0, 0,
+ 0, 0, 16, 97, 108, 105, 99, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 18, 0, 0, 0, 0, 1, 0, 17, 0, 11, 0, 6, 0, 5, 100, 0, 115,
+ 97, 108, 97, 114, 121, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 16, 97, 108, 105, 99, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 18, 0,
+ 0, 0, 1, 6, 36, 46, 114, 111, 108, 101, 9, 12, 7, 109, 97, 110, 97, 103, 101, 114, 0, 2, 0, 0, 0, 0, 0, 0, 0, 14, 98, 111, 98, 64, 100, 111,
+ 109, 97, 105, 110, 46, 99, 111, 109, 18, 0, 0, 0, 0, 1, 0, 17, 0, 11, 0, 6, 0, 5, 99, 0, 115, 97, 108, 97, 114, 121, 1, 1, 0, 2, 0, 0, 0, 0,
+ 0, 0, 0, 14, 98, 111, 98, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 18, 0, 0, 0, 1, 6, 36, 46, 114, 111, 108, 101, 9, 12, 7, 109, 97,
+ 110, 97, 103, 101, 114, 0, 3, 0, 0, 0, 0, 0, 0, 0, 18, 99, 104, 97, 114, 108, 105, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 18,
+ 0, 0, 0, 0, 1, 0, 17, 0, 11, 0, 6, 0, 5, 99, 0, 115, 97, 108, 97, 114, 121, 1, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 18, 99, 104, 97, 114, 108, 105,
+ 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 18, 0, 0, 0, 1, 6, 36, 46, 114, 111, 108, 101, 9, 12, 7, 109, 97, 110, 97, 103, 101,
+ 114, 0, 4, 0, 0, 0, 0, 0, 0, 0, 14, 100, 97, 110, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 18, 0, 0, 0, 0, 1, 0, 17, 0, 11, 0, 6, 0,
+ 5, 99, 0, 115, 97, 108, 97, 114, 121, 1, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 14, 100, 97, 110, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 18,
+ 0, 0, 0, 1, 6, 36, 46, 114, 111, 108, 101, 9, 12, 7, 109, 97, 110, 97, 103, 101, 114, 0, 5, 0, 0, 0, 0, 0, 0, 0, 14, 101, 118, 101, 64, 100,
+ 111, 109, 97, 105, 110, 46, 99, 111, 109, 18, 0, 0, 0, 0, 1, 0, 17, 0, 11, 0, 6, 0, 5, 100, 0, 115, 97, 108, 97, 114, 121, 1, 1, 0, 5, 0, 0, 0,
+ 0, 0, 0, 0, 14, 101, 118, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 18, 0, 0, 0, 1, 6, 36, 46, 114, 111, 108, 101, 9, 12, 7, 109,
+ 97, 110, 97, 103, 101, 114,
+ },
+ numRows: 5,
+ want: "JSON_INSERT(%s, _utf8mb4'$.role', CAST(JSON_QUOTE(_utf8mb4'manager') as JSON))",
+ },
+ {
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='alice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"role": "manager", "salary": 100}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='alice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REPLACE(@3, '$.role', 'IC') /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 155, 21, 87, 103, 39, 47, 142, 143, 12, 148, 0, 0, 0, 135, 106, 0, 0, 0, 0, 176, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 255, 255, 0, 1, 0, 0, 0, 0, 0, 0,
+ 0, 16, 97, 108, 105, 99, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 37, 0, 0, 0, 0, 2, 0, 36, 0, 18, 0, 4, 0, 22, 0, 6, 0, 12, 28,
+ 0, 5, 100, 0, 114, 111, 108, 101, 115, 97, 108, 97, 114, 121, 7, 109, 97, 110, 97, 103, 101, 114, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 16, 97, 108,
+ 105, 99, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 13, 0, 0, 0, 0, 6, 36, 46, 114, 111, 108, 101, 4, 12, 2, 73, 67,
+ },
+ name: "REPLACE",
+ numRows: 1,
+ want: "JSON_REPLACE(%s, _utf8mb4'$.role', CAST(JSON_QUOTE(_utf8mb4'IC') as JSON))",
+ },
+ {
+ name: "REMOVE",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=2 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='bob@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"role": "manager", "salary": 99}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=2 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='bob@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REMOVE(@3, '$.salary') /* JSON meta=4 nullable=1 is_null=0 */
+ numRows: 1,
+ rawEvent: []byte{
+ 176, 22, 87, 103, 39, 47, 142, 143, 12, 141, 0, 0, 0, 34, 108, 0, 0, 0, 0, 176, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 255, 255, 0, 2, 0, 0, 0, 0, 0, 0, 0,
+ 14, 98, 111, 98, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 37, 0, 0, 0, 0, 2, 0, 36, 0, 18, 0, 4, 0, 22, 0, 6, 0, 12, 28, 0, 5, 99, 0, 114,
+ 111, 108, 101, 115, 97, 108, 97, 114, 121, 7, 109, 97, 110, 97, 103, 101, 114, 1, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 14, 98, 111, 98, 64, 100, 111, 109,
+ 97, 105, 110, 46, 99, 111, 109, 10, 0, 0, 0, 2, 8, 36, 46, 115, 97, 108, 97, 114, 121,
+ },
+ want: "JSON_REMOVE(%s, _utf8mb4'$.salary')",
+ },
+ {
+ name: "REMOVE and REPLACE",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='alice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 100, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='alice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REMOVE(
+ // ### JSON_REPLACE(@3, '$.day', 'monday'),
+ // ### '$.favorite_color') /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=2 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='bob@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 99, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=2 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='bob@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REMOVE(
+ // ### JSON_REPLACE(@3, '$.day', 'monday'),
+ // ### '$.favorite_color') /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=3 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='charlie@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 99, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=3 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='charlie@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REMOVE(
+ // ### JSON_REPLACE(@3, '$.day', 'monday'),
+ // ### '$.favorite_color') /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='dan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 99, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='dan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REMOVE(
+ // ### JSON_REPLACE(@3, '$.day', 'monday'),
+ // ### '$.favorite_color') /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=5 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='eve@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 100, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=5 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='eve@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REMOVE(
+ // ### JSON_REPLACE(@3, '$.day', 'monday'),
+ // ### '$.favorite_color') /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 227, 240, 86, 103, 39, 74, 58, 208, 33, 225, 3, 0, 0, 173, 122, 0, 0, 0, 0, 176, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 255, 255, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 16, 97, 108, 105, 99, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 97, 0, 0, 0, 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4,
+ 0, 46, 0, 5, 0, 51, 0, 6, 0, 57, 0, 14, 0, 12, 71, 0, 12, 78, 0, 12, 86, 0, 5, 100, 0, 12, 90, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111,
+ 108, 111, 114, 115, 97, 108, 97, 114, 121, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 6, 102, 114, 105, 100, 97, 121,
+ 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100, 5, 98, 108, 97, 99, 107, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 16, 97, 108, 105, 99, 101,
+ 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 34, 0, 0, 0, 0, 5, 36, 46, 100, 97, 121, 8, 12, 6, 109, 111, 110, 100, 97, 121, 2, 16, 36,
+ 46, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 0, 2, 0, 0, 0, 0, 0, 0, 0, 14, 98, 111, 98, 64, 100, 111, 109, 97,
+ 105, 110, 46, 99, 111, 109, 97, 0, 0, 0, 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4, 0, 46, 0, 5, 0, 51, 0, 6, 0, 57, 0, 14, 0, 12, 71, 0, 12, 78,
+ 0, 12, 86, 0, 5, 99, 0, 12, 90, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111, 108, 111, 114, 115, 97, 108, 97, 114, 121, 102, 97, 118, 111,
+ 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 6, 102, 114, 105, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100, 5, 98,
+ 108, 97, 99, 107, 1, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 14, 98, 111, 98, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 34, 0, 0, 0, 0, 5, 36,
+ 46, 100, 97, 121, 8, 12, 6, 109, 111, 110, 100, 97, 121, 2, 16, 36, 46, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 0,
+ 3, 0, 0, 0, 0, 0, 0, 0, 18, 99, 104, 97, 114, 108, 105, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 97, 0, 0, 0, 0, 5, 0, 96, 0,
+ 39, 0, 3, 0, 42, 0, 4, 0, 46, 0, 5, 0, 51, 0, 6, 0, 57, 0, 14, 0, 12, 71, 0, 12, 78, 0, 12, 86, 0, 5, 99, 0, 12, 90, 0, 100, 97, 121, 114,
+ 111, 108, 101, 99, 111, 108, 111, 114, 115, 97, 108, 97, 114, 121, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 6, 102,
+ 114, 105, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100, 5, 98, 108, 97, 99, 107, 1, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 18,
+ 99, 104, 97, 114, 108, 105, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 34, 0, 0, 0, 0, 5, 36, 46, 100, 97, 121, 8, 12, 6, 109,
+ 111, 110, 100, 97, 121, 2, 16, 36, 46, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 0, 4, 0, 0, 0, 0, 0, 0, 0, 14, 100,
+ 97, 110, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 97, 0, 0, 0, 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4, 0, 46, 0, 5, 0, 51, 0, 6, 0,
+ 57, 0, 14, 0, 12, 71, 0, 12, 78, 0, 12, 86, 0, 5, 99, 0, 12, 90, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111, 108, 111, 114, 115, 97, 108,
+ 97, 114, 121, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 6, 102, 114, 105, 100, 97, 121, 7, 109, 97, 110, 97, 103,
+ 101, 114, 3, 114, 101, 100, 5, 98, 108, 97, 99, 107, 1, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 14, 100, 97, 110, 64, 100, 111, 109, 97, 105, 110, 46,
+ 99, 111, 109, 34, 0, 0, 0, 0, 5, 36, 46, 100, 97, 121, 8, 12, 6, 109, 111, 110, 100, 97, 121, 2, 16, 36, 46, 102, 97, 118, 111, 114, 105, 116,
+ 101, 95, 99, 111, 108, 111, 114, 0, 5, 0, 0, 0, 0, 0, 0, 0, 14, 101, 118, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 97, 0, 0, 0,
+ 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4, 0, 46, 0, 5, 0, 51, 0, 6, 0, 57, 0, 14, 0, 12, 71, 0, 12, 78, 0, 12, 86, 0, 5, 100, 0, 12, 90, 0, 100,
+ 97, 121, 114, 111, 108, 101, 99, 111, 108, 111, 114, 115, 97, 108, 97, 114, 121, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111,
+ 114, 6, 102, 114, 105, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100, 5, 98, 108, 97, 99, 107, 1, 1, 0, 5, 0, 0, 0, 0, 0,
+ 0, 0, 14, 101, 118, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 34, 0, 0, 0, 0, 5, 36, 46, 100, 97, 121, 8, 12, 6, 109, 111, 110,
+ 100, 97, 121, 2, 16, 36, 46, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114,
+ },
+ numRows: 5,
+ want: "JSON_REMOVE(JSON_REPLACE(%s, _utf8mb4'$.day', CAST(JSON_QUOTE(_utf8mb4'monday') as JSON)), _utf8mb4'$.favorite_color')",
+ },
+ {
+ name: "INSERT and REMOVE and REPLACE",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=3 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='charlie@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "monday", "role": "manager", "salary": 99, "favorite_color": "red"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=3 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='charlie@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_INSERT(
+ // ### JSON_REMOVE(
+ // ### JSON_REPLACE(@3, '$.day', 'tuesday'),
+ // ### '$.favorite_color'),
+ // ### '$.hobby', 'skiing') /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 48, 25, 87, 103, 39, 47, 142, 143, 12, 234, 0, 0, 0, 0, 117, 0, 0, 0, 0, 176, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 255, 255, 0, 3, 0, 0, 0, 0, 0, 0, 0,
+ 18, 99, 104, 97, 114, 108, 105, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 79, 0, 0, 0, 0, 4, 0, 78, 0, 32, 0, 3, 0, 35, 0, 4, 0,
+ 39, 0, 6, 0, 45, 0, 14, 0, 12, 59, 0, 12, 66, 0, 5, 99, 0, 12, 74, 0, 100, 97, 121, 114, 111, 108, 101, 115, 97, 108, 97, 114, 121, 102, 97,
+ 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 6, 109, 111, 110, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100,
+ 1, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 18, 99, 104, 97, 114, 108, 105, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 53, 0, 0, 0, 0, 5, 36,
+ 46, 100, 97, 121, 9, 12, 7, 116, 117, 101, 115, 100, 97, 121, 2, 16, 36, 46, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114,
+ 1, 7, 36, 46, 104, 111, 98, 98, 121, 8, 12, 6, 115, 107, 105, 105, 110, 103,
+ },
+ numRows: 1,
+ want: "JSON_INSERT(JSON_REMOVE(JSON_REPLACE(%s, _utf8mb4'$.day', CAST(JSON_QUOTE(_utf8mb4'tuesday') as JSON)), _utf8mb4'$.favorite_color'), _utf8mb4'$.hobby', CAST(JSON_QUOTE(_utf8mb4'skiing') as JSON))",
+ },
+ {
+ name: "REPLACE with null",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='dan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"role": "manager", "salary": 99}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='dan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REPLACE(@3, '$.salary', null) /* JSON meta=4 nullable=1 is_null=0 *
+ rawEvent: []byte{
+ 148, 26, 87, 103, 39, 47, 142, 143, 12, 144, 0, 0, 0, 158, 118, 0, 0, 0, 0, 176, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 255, 255, 0, 4, 0, 0, 0, 0, 0, 0, 0,
+ 14, 100, 97, 110, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 37, 0, 0, 0, 0, 2, 0, 36, 0, 18, 0, 4, 0, 22, 0, 6, 0, 12, 28, 0, 5, 99, 0,
+ 114, 111, 108, 101, 115, 97, 108, 97, 114, 121, 7, 109, 97, 110, 97, 103, 101, 114, 1, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 14, 100, 97, 110, 64, 100, 111,
+ 109, 97, 105, 110, 46, 99, 111, 109, 13, 0, 0, 0, 0, 8, 36, 46, 115, 97, 108, 97, 114, 121, 2, 4, 0,
+ },
+ numRows: 1,
+ want: "JSON_REPLACE(%s, _utf8mb4'$.salary', CAST(_utf8mb4'null' as JSON))",
+ },
+ {
+ name: "REPLACE 2 paths",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='dan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"role": "manager", "salary": null}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='dan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_REPLACE(@3, '$.salary', 110,
+ // ### '$.role', 'IC') /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 32, 32, 87, 103, 39, 26, 45, 78, 117, 158, 0, 0, 0, 145, 106, 0, 0, 0, 0, 176, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 255, 255, 0, 4, 0, 0, 0, 0, 0, 0, 0, 14,
+ 100, 97, 110, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 37, 0, 0, 0, 0, 2, 0, 36, 0, 18, 0, 4, 0, 22, 0, 6, 0, 12, 28, 0, 5, 99, 0, 114, 111,
+ 108, 101, 115, 97, 108, 97, 114, 121, 7, 109, 97, 110, 97, 103, 101, 114, 1, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 14, 100, 97, 110, 64, 100, 111, 109, 97,
+ 105, 110, 46, 99, 111, 109, 27, 0, 0, 0, 0, 8, 36, 46, 115, 97, 108, 97, 114, 121, 3, 5, 110, 0, 0, 6, 36, 46, 114, 111, 108, 101, 4, 12, 2, 73, 67,
+ },
+ numRows: 1,
+ want: "JSON_REPLACE(JSON_REPLACE(%s, _utf8mb4'$.salary', CAST(110 as JSON)), _utf8mb4'$.role', CAST(JSON_QUOTE(_utf8mb4'IC') as JSON))",
+ },
+ {
+ name: "JSON null",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=5 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='neweve@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 100, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=5 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='neweve@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='null' /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 109, 200, 88, 103, 39, 57, 91, 186, 0, 194, 0, 0, 0, 0, 0, 0, 0, 0, 0, 178, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 7, 7, 0, 5, 0, 0, 0, 0, 0, 0, 0, 17, 110,
+ 101, 119, 101, 118, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 97, 0, 0, 0, 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4, 0, 46, 0, 5, 0, 51,
+ 0, 6, 0, 57, 0, 14, 0, 12, 71, 0, 12, 78, 0, 12, 86, 0, 5, 100, 0, 12, 90, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111, 108, 111, 114, 115, 97,
+ 108, 97, 114, 121, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 6, 102, 114, 105, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101,
+ 114, 3, 114, 101, 100, 5, 98, 108, 97, 99, 107, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 17, 110, 101, 119, 101, 118, 101, 64, 100, 111, 109, 97, 105, 110, 46,
+ 99, 111, 109, 2, 0, 0, 0, 4, 0,
+ },
+ numRows: 1,
+ want: "null",
+ },
+ {
+ name: "null literal string",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=10 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='mlord@planetscale.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=NULL /* JSON meta=4 nullable=1 is_null=1 */
+ // ### SET
+ // ### @1=10 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='mlord@planetscale.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='null' /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 178, 168, 89, 103, 39, 37, 191, 137, 18, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 153, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 7, 7, 4, 10, 0, 0, 0, 0, 0, 0, 0, 21,
+ 109, 108, 111, 114, 100, 64, 112, 108, 97, 110, 101, 116, 115, 99, 97, 108, 101, 46, 99, 111, 109, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 21, 109, 108, 111,
+ 114, 100, 64, 112, 108, 97, 110, 101, 116, 115, 99, 97, 108, 101, 46, 99, 111, 109, 6, 0, 0, 0, 12, 4, 110, 117, 108, 108,
+ },
+ numRows: 1,
+ want: "\"null\"",
+ },
+ {
+ name: "JSON object",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newalice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "wednesday", "role": "manager", "color": "red", "salary": 100}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newalice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=JSON_INSERT(@3, '$.misc', '{"address":"1012 S Park", "town":"Hastings", "state":"MI"}') /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 208, 160, 89, 103, 39, 202, 59, 214, 68, 242, 0, 0, 0, 0, 0, 0, 0, 0, 0, 153, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 7, 7, 0, 1, 0, 0, 0, 0, 0, 0, 0, 19, 110,
+ 101, 119, 97, 108, 105, 99, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 73, 0, 0, 0, 0, 4, 0, 72, 0, 32, 0, 3, 0, 35, 0, 4, 0, 39, 0, 5,
+ 0, 44, 0, 6, 0, 12, 50, 0, 12, 60, 0, 12, 68, 0, 5, 100, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111, 108, 111, 114, 115, 97, 108, 97, 114, 121, 9,
+ 119, 101, 100, 110, 101, 115, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 19, 110, 101, 119,
+ 97, 108, 105, 99, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 69, 0, 0, 0, 1, 6, 36, 46, 109, 105, 115, 99, 60, 12, 58, 123, 34, 97, 100,
+ 100, 114, 101, 115, 115, 34, 58, 34, 49, 48, 49, 50, 32, 83, 32, 80, 97, 114, 107, 34, 44, 32, 34, 116, 111, 119, 110, 34, 58, 34, 72, 97, 115, 116,
+ 105, 110, 103, 115, 34, 44, 32, 34, 115, 116, 97, 116, 101, 34, 58, 34, 77, 73, 34, 125,
+ },
+ numRows: 1,
+ want: "JSON_INSERT(%s, _utf8mb4'$.misc', CAST(JSON_QUOTE(_utf8mb4'{\"address\":\"1012 S Park\", \"town\":\"Hastings\", \"state\":\"MI\"}') as JSON))",
+ },
+ {
+ name: "JSON field not updated",
+ // The mysqlbinlog -vvv --base64-output=decode-rows output for the following event:
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=1 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newalice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 100, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=101 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newalice@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=@3 /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=2 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newbob@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 99, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=102 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newbob@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=@3 /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=3 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newcharlie@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "monday", "role": "manager", "color": "red", "hobby": "skiing", "salary": 99}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=103 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newcharlie@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=@3 /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=4 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newdan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 99, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=104 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='newdan@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=@3 /* JSON meta=4 nullable=1 is_null=0 */
+ // ### UPDATE `vt_commerce`.`customer`
+ // ### WHERE
+ // ### @1=5 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='neweve@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3='{"day": "friday", "role": "manager", "color": "red", "salary": 100, "favorite_color": "black"}' /* JSON meta=4 nullable=1 is_null=0 */
+ // ### SET
+ // ### @1=105 /* LONGINT meta=0 nullable=0 is_null=0 */
+ // ### @2='neweve@domain.com' /* VARSTRING(128) meta=128 nullable=1 is_null=0 */
+ // ### @3=@3 /* JSON meta=4 nullable=1 is_null=0 */
+ rawEvent: []byte{
+ 194, 74, 100, 103, 39, 46, 144, 133, 54, 77, 3, 0, 0, 77, 128, 0, 0, 0, 0, 153, 0, 0, 0, 0, 0, 1, 0, 2, 0, 3, 255, 255, 0, 1, 0, 0, 0, 0, 0, 0, 0,
+ 19, 110, 101, 119, 97, 108, 105, 99, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 97, 0, 0, 0, 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4, 0,
+ 46, 0, 5, 0, 51, 0, 6, 0, 57, 0, 14, 0, 12, 71, 0, 12, 78, 0, 12, 86, 0, 5, 100, 0, 12, 90, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111, 108, 111,
+ 114, 115, 97, 108, 97, 114, 121, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 6, 102, 114, 105, 100, 97, 121, 7, 109, 97,
+ 110, 97, 103, 101, 114, 3, 114, 101, 100, 5, 98, 108, 97, 99, 107, 1, 1, 0, 101, 0, 0, 0, 0, 0, 0, 0, 19, 110, 101, 119, 97, 108, 105, 99, 101, 64,
+ 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 17, 110, 101, 119, 98, 111, 98, 64, 100, 111, 109, 97, 105,
+ 110, 46, 99, 111, 109, 97, 0, 0, 0, 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4, 0, 46, 0, 5, 0, 51, 0, 6, 0, 57, 0, 14, 0, 12, 71, 0, 12, 78, 0, 12, 86,
+ 0, 5, 99, 0, 12, 90, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111, 108, 111, 114, 115, 97, 108, 97, 114, 121, 102, 97, 118, 111, 114, 105, 116, 101,
+ 95, 99, 111, 108, 111, 114, 6, 102, 114, 105, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100, 5, 98, 108, 97, 99, 107, 1, 1, 0,
+ 102, 0, 0, 0, 0, 0, 0, 0, 17, 110, 101, 119, 98, 111, 98, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
+ 21, 110, 101, 119, 99, 104, 97, 114, 108, 105, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 89, 0, 0, 0, 0, 5, 0, 88, 0, 39, 0, 3, 0, 42,
+ 0, 4, 0, 46, 0, 5, 0, 51, 0, 5, 0, 56, 0, 6, 0, 12, 62, 0, 12, 69, 0, 12, 77, 0, 12, 81, 0, 5, 99, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111, 108,
+ 111, 114, 104, 111, 98, 98, 121, 115, 97, 108, 97, 114, 121, 6, 109, 111, 110, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100, 6,
+ 115, 107, 105, 105, 110, 103, 1, 1, 0, 103, 0, 0, 0, 0, 0, 0, 0, 21, 110, 101, 119, 99, 104, 97, 114, 108, 105, 101, 64, 100, 111, 109, 97, 105, 110,
+ 46, 99, 111, 109, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 17, 110, 101, 119, 100, 97, 110, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 97, 0,
+ 0, 0, 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4, 0, 46, 0, 5, 0, 51, 0, 6, 0, 57, 0, 14, 0, 12, 71, 0, 12, 78, 0, 12, 86, 0, 5, 99, 0, 12, 90, 0, 100,
+ 97, 121, 114, 111, 108, 101, 99, 111, 108, 111, 114, 115, 97, 108, 97, 114, 121, 102, 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114,
+ 6, 102, 114, 105, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100, 5, 98, 108, 97, 99, 107, 1, 1, 0, 104, 0, 0, 0, 0, 0, 0, 0, 17,
+ 110, 101, 119, 100, 97, 110, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 17, 110, 101, 119, 101, 118,
+ 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 97, 0, 0, 0, 0, 5, 0, 96, 0, 39, 0, 3, 0, 42, 0, 4, 0, 46, 0, 5, 0, 51, 0, 6, 0, 57, 0, 14,
+ 0, 12, 71, 0, 12, 78, 0, 12, 86, 0, 5, 100, 0, 12, 90, 0, 100, 97, 121, 114, 111, 108, 101, 99, 111, 108, 111, 114, 115, 97, 108, 97, 114, 121, 102,
+ 97, 118, 111, 114, 105, 116, 101, 95, 99, 111, 108, 111, 114, 6, 102, 114, 105, 100, 97, 121, 7, 109, 97, 110, 97, 103, 101, 114, 3, 114, 101, 100,
+ 5, 98, 108, 97, 99, 107, 1, 1, 0, 105, 0, 0, 0, 0, 0, 0, 0, 17, 110, 101, 119, 101, 118, 101, 64, 100, 111, 109, 97, 105, 110, 46, 99, 111, 109, 0,
+ 0, 0, 0,
+ },
+ numRows: 5,
+ want: "",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ mysql56PartialUpdateRowEvent := NewMysql56BinlogEvent(tc.rawEvent)
+ require.True(t, mysql56PartialUpdateRowEvent.IsPartialUpdateRows())
+
+ ev, err := mysql56PartialUpdateRowEvent.Rows(format, tm)
+ require.NoError(t, err)
+
+ assert.Equal(t, tc.numRows, len(ev.Rows))
+ require.NoError(t, err)
+
+ for i := range ev.Rows {
+ vals, err := ev.StringValuesForTests(tm, i)
+ require.NoError(t, err)
+ // The third column is the JSON column.
+ require.Equal(t, tc.want, vals[2])
+ t.Logf("Rows: %v", vals)
+ }
+ })
+ }
+}
diff --git a/go/mysql/binlog_event_rbr.go b/go/mysql/binlog_event_rbr.go
index d77b7bcb9a0..29e0211bc36 100644
--- a/go/mysql/binlog_event_rbr.go
+++ b/go/mysql/binlog_event_rbr.go
@@ -283,10 +283,10 @@ func readColumnCollationIDs(data []byte, pos, count int) ([]collations.ID, error
func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) {
typ := ev.Type()
data := ev.Bytes()[f.HeaderLength:]
- hasIdentify := typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2 ||
+ hasIdentify := typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2 || typ == ePartialUpdateRowsEvent ||
typ == eDeleteRowsEventV1 || typ == eDeleteRowsEventV2
hasData := typ == eWriteRowsEventV1 || typ == eWriteRowsEventV2 ||
- typ == eUpdateRowsEventV1 || typ == eUpdateRowsEventV2
+ typ == eUpdateRowsEventV1 || typ == ePartialUpdateRowsEvent || typ == eUpdateRowsEventV2
result := Rows{}
pos := 6
@@ -297,7 +297,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) {
pos += 2
// version=2 have extra data here.
- if typ == eWriteRowsEventV2 || typ == eUpdateRowsEventV2 || typ == eDeleteRowsEventV2 {
+ if typ == eWriteRowsEventV2 || typ == eUpdateRowsEventV2 || typ == ePartialUpdateRowsEvent || typ == eDeleteRowsEventV2 {
// This extraDataLength contains the 2 bytes length.
extraDataLength := binary.LittleEndian.Uint16(data[pos : pos+2])
pos += int(extraDataLength)
@@ -311,6 +311,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) {
numIdentifyColumns := 0
numDataColumns := 0
+ numJSONColumns := 0
if hasIdentify {
// Bitmap of the columns used for identify.
@@ -324,6 +325,15 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) {
numDataColumns = result.DataColumns.BitCount()
}
+ // For PartialUpdateRowsEvents, we need to know how many JSON columns there are.
+ if ev.Type() == ePartialUpdateRowsEvent {
+ for c := 0; c < int(columnCount); c++ {
+ if tm.Types[c] == binlog.TypeJSON {
+ numJSONColumns++
+ }
+ }
+ }
+
// One row at a time.
for pos < len(data) {
row := Row{}
@@ -358,6 +368,17 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) {
row.Identify = data[startPos:pos]
}
+ if ev.Type() == ePartialUpdateRowsEvent {
+ // The first byte indicates whether or not any JSON values are partial.
+ // If it's not 1 then there's nothing special to do for the row as any
+ // columns use the full value.
+ partialJSON := uint8(data[pos])
+ pos++
+ if partialJSON == 1 {
+ row.JSONPartialValues, pos = newBitmap(data, pos, numJSONColumns)
+ }
+ }
+
if hasData {
// Bitmap of columns that are null (amongst the ones that are present).
row.NullColumns, pos = newBitmap(data, pos, numDataColumns)
@@ -402,6 +423,7 @@ func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, erro
var result []string
valueIndex := 0
+ jsonIndex := 0
data := rs.Rows[rowIndex].Data
pos := 0
for c := 0; c < rs.DataColumns.Count(); c++ {
@@ -413,15 +435,24 @@ func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, erro
// This column is represented, but its value is NULL.
result = append(result, "NULL")
valueIndex++
+ if tm.Types[c] == binlog.TypeJSON {
+ jsonIndex++
+ }
continue
}
- // We have real data
- value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64})
+ partialJSON := false
+ if rs.Rows[rowIndex].JSONPartialValues.Count() > 0 && tm.Types[c] == binlog.TypeJSON {
+ partialJSON = rs.Rows[rowIndex].JSONPartialValues.Bit(jsonIndex)
+ jsonIndex++
+ }
+
+ // We have real data.
+ value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}, partialJSON)
if err != nil {
return nil, err
}
- result = append(result, value.ToString())
+ result = append(result, value.RawStr())
pos += l
valueIndex++
}
@@ -452,7 +483,7 @@ func (rs *Rows) StringIdentifiesForTests(tm *TableMap, rowIndex int) ([]string,
}
// We have real data
- value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64})
+ value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}, false)
if err != nil {
return nil, err
}
diff --git a/go/mysql/datetime/spec.go b/go/mysql/datetime/spec.go
index ce19126ce55..fe6b1521e43 100644
--- a/go/mysql/datetime/spec.go
+++ b/go/mysql/datetime/spec.go
@@ -359,10 +359,7 @@ func (t2 fmtFullTime24) parse(t *timeparts, bytes string) (string, bool) {
type fmtWeek0 struct{}
func (fmtWeek0) format(dst []byte, t DateTime, prec uint8) []byte {
- year, week := t.Date.SundayWeek()
- if year < t.Date.Year() {
- week = 0
- }
+ week := t.Date.Week(0)
return appendInt(dst, week, 2)
}
@@ -374,10 +371,7 @@ func (u fmtWeek0) parse(t *timeparts, bytes string) (string, bool) {
type fmtWeek1 struct{}
func (fmtWeek1) format(dst []byte, t DateTime, prec uint8) []byte {
- year, week := t.Date.ISOWeek()
- if year < t.Date.Year() {
- week = 0
- }
+ week := t.Date.Week(1)
return appendInt(dst, week, 2)
}
@@ -389,7 +383,7 @@ func (u fmtWeek1) parse(t *timeparts, bytes string) (string, bool) {
type fmtWeek2 struct{}
func (fmtWeek2) format(dst []byte, t DateTime, prec uint8) []byte {
- _, week := t.Date.SundayWeek()
+ week := t.Date.Week(2)
return appendInt(dst, week, 2)
}
@@ -401,7 +395,7 @@ func (v fmtWeek2) parse(t *timeparts, bytes string) (string, bool) {
type fmtWeek3 struct{}
func (fmtWeek3) format(dst []byte, t DateTime, prec uint8) []byte {
- _, week := t.Date.ISOWeek()
+ week := t.Date.Week(3)
return appendInt(dst, week, 2)
}
diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go
index d3b9a6722ea..a04f75c6b43 100644
--- a/go/mysql/endtoend/replication_test.go
+++ b/go/mysql/endtoend/replication_test.go
@@ -1064,7 +1064,7 @@ func valuesForTests(t *testing.T, rs *mysql.Rows, tm *mysql.TableMap, rowIndex i
}
// We have real data
- value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64})
+ value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}, false)
if err != nil {
return nil, err
}
diff --git a/go/mysql/replication_constants.go b/go/mysql/replication_constants.go
index 6b6e34b2333..27d0bd331ce 100644
--- a/go/mysql/replication_constants.go
+++ b/go/mysql/replication_constants.go
@@ -110,6 +110,9 @@ const (
//eViewChangeEvent = 37
//eXAPrepareLogEvent = 38
+ // PartialUpdateRowsEvent when binlog_row_value_options=PARTIAL_JSON.
+ ePartialUpdateRowsEvent = 39
+
// Transaction_payload_event when binlog_transaction_compression=ON.
eTransactionPayloadEvent = 40
diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go
index 1290156a1cd..4253fbb5860 100644
--- a/go/test/endtoend/cluster/vtgate_process.go
+++ b/go/test/endtoend/cluster/vtgate_process.go
@@ -28,6 +28,7 @@ import (
"strconv"
"strings"
"syscall"
+ "testing"
"time"
"vitess.io/vitess/go/vt/log"
@@ -57,6 +58,8 @@ type VtgateProcess struct {
Directory string
VerifyURL string
VSchemaURL string
+ ConfigFile string
+ Config VTGateConfiguration
SysVarSetEnabled bool
PlannerVersion plancontext.PlannerVersion
// Extra Args to be set before starting the vtgate process
@@ -66,6 +69,77 @@ type VtgateProcess struct {
exit chan error
}
+type VTGateConfiguration struct {
+ TransactionMode string `json:"transaction_mode,omitempty"`
+}
+
+// ToJSONString will marshal this configuration as JSON
+func (config *VTGateConfiguration) ToJSONString() string {
+ b, _ := json.MarshalIndent(config, "", "\t")
+ return string(b)
+}
+
+func (vtgate *VtgateProcess) RewriteConfiguration() error {
+ return os.WriteFile(vtgate.ConfigFile, []byte(vtgate.Config.ToJSONString()), 0644)
+}
+
+// WaitForConfig waits for the expectedConfig to be present in the vtgate configuration.
+func (vtgate *VtgateProcess) WaitForConfig(expectedConfig string) error {
+ timeout := time.After(30 * time.Second)
+ var response string
+ for {
+ select {
+ case <-timeout:
+ return fmt.Errorf("timed out waiting for api to work. Last response - %s", response)
+ default:
+ _, response, _ = vtgate.MakeAPICall("/debug/config")
+ if strings.Contains(response, expectedConfig) {
+ return nil
+ }
+ time.Sleep(1 * time.Second)
+ }
+ }
+}
+
+// MakeAPICall makes an API call on the given endpoint of VTOrc
+func (vtgate *VtgateProcess) MakeAPICall(endpoint string) (status int, response string, err error) {
+ url := fmt.Sprintf("http://localhost:%d/%s", vtgate.Port, endpoint)
+ resp, err := http.Get(url)
+ if err != nil {
+ if resp != nil {
+ status = resp.StatusCode
+ }
+ return status, "", err
+ }
+ defer func() {
+ if resp != nil && resp.Body != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ respByte, _ := io.ReadAll(resp.Body)
+ return resp.StatusCode, string(respByte), err
+}
+
+// MakeAPICallRetry is used to make an API call and retries until success
+func (vtgate *VtgateProcess) MakeAPICallRetry(t *testing.T, url string) {
+ t.Helper()
+ timeout := time.After(10 * time.Second)
+ for {
+ select {
+ case <-timeout:
+ t.Fatal("timed out waiting for api to work")
+ return
+ default:
+ status, _, err := vtgate.MakeAPICall(url)
+ if err == nil && status == 200 {
+ return
+ }
+ time.Sleep(1 * time.Second)
+ }
+ }
+}
+
const defaultVtGatePlannerVersion = planbuilder.Gen4
// Setup starts Vtgate process with required arguements
@@ -74,6 +148,7 @@ func (vtgate *VtgateProcess) Setup() (err error) {
"--topo_implementation", vtgate.CommonArg.TopoImplementation,
"--topo_global_server_address", vtgate.CommonArg.TopoGlobalAddress,
"--topo_global_root", vtgate.CommonArg.TopoGlobalRoot,
+ "--config-file", vtgate.ConfigFile,
"--log_dir", vtgate.LogDir,
"--log_queries_to_file", vtgate.FileToLogQueries,
"--port", fmt.Sprintf("%d", vtgate.Port),
@@ -98,6 +173,19 @@ func (vtgate *VtgateProcess) Setup() (err error) {
break
}
}
+ configFile, err := os.Create(vtgate.ConfigFile)
+ if err != nil {
+ log.Errorf("cannot create config file for vtgate: %v", err)
+ return err
+ }
+ _, err = configFile.WriteString(vtgate.Config.ToJSONString())
+ if err != nil {
+ return err
+ }
+ err = configFile.Close()
+ if err != nil {
+ return err
+ }
if !msvflag {
version, err := mysqlctl.GetVersionString()
if err != nil {
@@ -287,6 +375,7 @@ func VtgateProcessInstance(
Name: "vtgate",
Binary: "vtgate",
FileToLogQueries: path.Join(tmpDirectory, "/vtgate_querylog.txt"),
+ ConfigFile: path.Join(tmpDirectory, fmt.Sprintf("vtgate-config-%d.json", port)),
Directory: os.Getenv("VTDATAROOT"),
ServiceMap: "grpc-tabletmanager,grpc-throttler,grpc-queryservice,grpc-updatestream,grpc-vtctl,grpc-vtgateservice",
LogDir: tmpDirectory,
diff --git a/go/test/endtoend/transaction/twopc/main_test.go b/go/test/endtoend/transaction/twopc/main_test.go
index 58fe45547a5..3607beea72a 100644
--- a/go/test/endtoend/transaction/twopc/main_test.go
+++ b/go/test/endtoend/transaction/twopc/main_test.go
@@ -77,7 +77,6 @@ func TestMain(m *testing.M) {
// Set extra args for twopc
clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs,
- "--transaction_mode", "TWOPC",
"--grpc_use_effective_callerid",
)
clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs,
@@ -103,6 +102,13 @@ func TestMain(m *testing.M) {
if err := clusterInstance.StartVtgate(); err != nil {
return 1
}
+ clusterInstance.VtgateProcess.Config.TransactionMode = "TWOPC"
+ if err := clusterInstance.VtgateProcess.RewriteConfiguration(); err != nil {
+ return 1
+ }
+ if err := clusterInstance.VtgateProcess.WaitForConfig(`"transaction_mode":"TWOPC"`); err != nil {
+ return 1
+ }
vtParams = clusterInstance.GetVTParams(keyspaceName)
vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort)
diff --git a/go/test/endtoend/transaction/twopc/twopc_test.go b/go/test/endtoend/transaction/twopc/twopc_test.go
index a760cfb24b3..b7f7c11fba9 100644
--- a/go/test/endtoend/transaction/twopc/twopc_test.go
+++ b/go/test/endtoend/transaction/twopc/twopc_test.go
@@ -44,6 +44,38 @@ import (
"vitess.io/vitess/go/vt/vttablet/grpctmclient"
)
+// TestDynamicConfig tests that transaction mode is dynamically configurable.
+func TestDynamicConfig(t *testing.T) {
+ conn, closer := start(t)
+ defer closer()
+ defer conn.Close()
+
+ // Ensure that initially running a distributed transaction is possible.
+ utils.Exec(t, conn, "begin")
+ utils.Exec(t, conn, "insert into twopc_t1(id, col) values(4, 4)")
+ utils.Exec(t, conn, "insert into twopc_t1(id, col) values(6, 4)")
+ utils.Exec(t, conn, "insert into twopc_t1(id, col) values(9, 4)")
+ utils.Exec(t, conn, "commit")
+
+ clusterInstance.VtgateProcess.Config.TransactionMode = "SINGLE"
+ defer func() {
+ clusterInstance.VtgateProcess.Config.TransactionMode = "TWOPC"
+ err := clusterInstance.VtgateProcess.RewriteConfiguration()
+ require.NoError(t, err)
+ }()
+ err := clusterInstance.VtgateProcess.RewriteConfiguration()
+ require.NoError(t, err)
+ err = clusterInstance.VtgateProcess.WaitForConfig(`"transaction_mode":"SINGLE"`)
+ require.NoError(t, err)
+
+ // After the config changes verify running a distributed transaction fails.
+ utils.Exec(t, conn, "begin")
+ utils.Exec(t, conn, "insert into twopc_t1(id, col) values(20, 4)")
+ _, err = utils.ExecAllowError(t, conn, "insert into twopc_t1(id, col) values(22, 4)")
+ require.ErrorContains(t, err, "multi-db transaction attempted")
+ utils.Exec(t, conn, "rollback")
+}
+
// TestDTCommit tests distributed transaction commit for insert, update and delete operations
// It verifies the binlog events for the same with transaction state changes and redo statements.
func TestDTCommit(t *testing.T) {
diff --git a/go/test/endtoend/vreplication/lookupindex_helper_test.go b/go/test/endtoend/vreplication/lookup_vindex_helper_test.go
similarity index 59%
rename from go/test/endtoend/vreplication/lookupindex_helper_test.go
rename to go/test/endtoend/vreplication/lookup_vindex_helper_test.go
index 864a5e0f7fc..1c74dadc642 100644
--- a/go/test/endtoend/vreplication/lookupindex_helper_test.go
+++ b/go/test/endtoend/vreplication/lookup_vindex_helper_test.go
@@ -29,7 +29,7 @@ import (
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
)
-type lookupIndex struct {
+type lookupVindex struct {
typ string
name string
tableKeyspace string
@@ -42,64 +42,64 @@ type lookupIndex struct {
t *testing.T
}
-func (li *lookupIndex) String() string {
- return li.typ + " " + li.name + " on " + li.tableKeyspace + "." + li.table + " (" + li.columns[0] + ")"
+func (lv *lookupVindex) String() string {
+ return lv.typ + " " + lv.name + " on " + lv.tableKeyspace + "." + lv.table + " (" + lv.columns[0] + ")"
}
-func (li *lookupIndex) create() {
- cols := strings.Join(li.columns, ",")
+func (lv *lookupVindex) create() {
+ cols := strings.Join(lv.columns, ",")
args := []string{
"LookupVindex",
- "--name", li.name,
- "--table-keyspace=" + li.ownerTableKeyspace,
+ "--name", lv.name,
+ "--table-keyspace=" + lv.ownerTableKeyspace,
"create",
- "--keyspace=" + li.tableKeyspace,
- "--type=" + li.typ,
- "--table-owner=" + li.ownerTable,
+ "--keyspace=" + lv.tableKeyspace,
+ "--type=" + lv.typ,
+ "--table-owner=" + lv.ownerTable,
"--table-owner-columns=" + cols,
"--tablet-types=PRIMARY",
}
- if li.ignoreNulls {
+ if lv.ignoreNulls {
args = append(args, "--ignore-nulls")
}
err := vc.VtctldClient.ExecuteCommand(args...)
- require.NoError(li.t, err, "error executing LookupVindex create: %v", err)
- waitForWorkflowState(li.t, vc, fmt.Sprintf("%s.%s", li.ownerTableKeyspace, li.name), binlogdatapb.VReplicationWorkflowState_Running.String())
- li.expectWriteOnly(true)
+ require.NoError(lv.t, err, "error executing LookupVindex create: %v", err)
+ waitForWorkflowState(lv.t, vc, fmt.Sprintf("%s.%s", lv.ownerTableKeyspace, lv.name), binlogdatapb.VReplicationWorkflowState_Running.String())
+ lv.expectWriteOnly(true)
}
-func (li *lookupIndex) cancel() {
+func (lv *lookupVindex) cancel() {
panic("not implemented")
}
-func (li *lookupIndex) externalize() {
+func (lv *lookupVindex) externalize() {
args := []string{
"LookupVindex",
- "--name", li.name,
- "--table-keyspace=" + li.ownerTableKeyspace,
+ "--name", lv.name,
+ "--table-keyspace=" + lv.ownerTableKeyspace,
"externalize",
- "--keyspace=" + li.tableKeyspace,
+ "--keyspace=" + lv.tableKeyspace,
}
err := vc.VtctldClient.ExecuteCommand(args...)
- require.NoError(li.t, err, "error executing LookupVindex externalize: %v", err)
- li.expectWriteOnly(false)
+ require.NoError(lv.t, err, "error executing LookupVindex externalize: %v", err)
+ lv.expectWriteOnly(false)
}
-func (li *lookupIndex) show() error {
+func (lv *lookupVindex) show() error {
return nil
}
-func (li *lookupIndex) expectWriteOnly(expected bool) {
- vschema, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", li.ownerTableKeyspace)
- require.NoError(li.t, err, "error executing GetVSchema: %v", err)
- vdx := gjson.Get(vschema, fmt.Sprintf("vindexes.%s", li.name))
- require.NotNil(li.t, vdx, "lookup vindex %s not found", li.name)
+func (lv *lookupVindex) expectWriteOnly(expected bool) {
+ vschema, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", lv.ownerTableKeyspace)
+ require.NoError(lv.t, err, "error executing GetVSchema: %v", err)
+ vdx := gjson.Get(vschema, fmt.Sprintf("vindexes.%s", lv.name))
+ require.NotNil(lv.t, vdx, "lookup vindex %s not found", lv.name)
want := ""
if expected {
want = "true"
}
- require.Equal(li.t, want, vdx.Get("params.write_only").String(), "expected write_only parameter to be %s", want)
+ require.Equal(lv.t, want, vdx.Get("params.write_only").String(), "expected write_only parameter to be %s", want)
}
func getNumRowsInQuery(t *testing.T, query string) int {
diff --git a/go/test/endtoend/vreplication/lookupindex_test.go b/go/test/endtoend/vreplication/lookup_vindex_test.go
similarity index 90%
rename from go/test/endtoend/vreplication/lookupindex_test.go
rename to go/test/endtoend/vreplication/lookup_vindex_test.go
index 348a0ee5906..c0864b26cca 100644
--- a/go/test/endtoend/vreplication/lookupindex_test.go
+++ b/go/test/endtoend/vreplication/lookup_vindex_test.go
@@ -63,7 +63,7 @@ create table t1(
`,
}
-func setupLookupIndexKeyspace(t *testing.T) map[string]*cluster.VttabletProcess {
+func setupLookupVindexKeyspace(t *testing.T) map[string]*cluster.VttabletProcess {
tablets := make(map[string]*cluster.VttabletProcess)
if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, lookupClusterSpec.keyspaceName, "-80,80-",
lookupClusterSpec.vschema, lookupClusterSpec.schema, defaultReplicas, defaultRdonly, 200, nil); err != nil {
@@ -80,14 +80,14 @@ func setupLookupIndexKeyspace(t *testing.T) map[string]*cluster.VttabletProcess
type lookupTestCase struct {
name string
- li *lookupIndex
+ lv *lookupVindex
initQuery string
runningQuery string
postExternalizeQuery string
cleanupQuery string
}
-func TestLookupIndex(t *testing.T) {
+func TestLookupVindex(t *testing.T) {
setSidecarDBName("_vt")
origDefaultReplicas := defaultReplicas
origDefaultRdonly := defaultRdonly
@@ -101,7 +101,7 @@ func TestLookupIndex(t *testing.T) {
defer vc.TearDown()
vttablet.InitVReplicationConfigDefaults()
- _ = setupLookupIndexKeyspace(t)
+ _ = setupLookupVindexKeyspace(t)
initQuery := "insert into t1 (c1, c2, val) values (1, 1, 'val1'), (2, 2, 'val2'), (3, 3, 'val3')"
runningQuery := "insert into t1 (c1, c2, val) values (4, 4, 'val4'), (5, 5, 'val5'), (6, 6, 'val6')"
@@ -111,7 +111,7 @@ func TestLookupIndex(t *testing.T) {
testCases := []lookupTestCase{
{
name: "non-unique lookup index, one column",
- li: &lookupIndex{
+ lv: &lookupVindex{
typ: "consistent_lookup",
name: "t1_c2_lookup",
tableKeyspace: lookupClusterSpec.keyspaceName,
@@ -125,7 +125,7 @@ func TestLookupIndex(t *testing.T) {
},
{
name: "lookup index, two columns",
- li: &lookupIndex{
+ lv: &lookupVindex{
typ: "lookup",
name: "t1_c2_val_lookup",
tableKeyspace: lookupClusterSpec.keyspaceName,
@@ -139,7 +139,7 @@ func TestLookupIndex(t *testing.T) {
},
{
name: "unique lookup index, one column",
- li: &lookupIndex{
+ lv: &lookupVindex{
typ: "lookup_unique",
name: "t1_c2_unique_lookup",
tableKeyspace: lookupClusterSpec.keyspaceName,
@@ -168,7 +168,7 @@ func testLookupVindex(t *testing.T, tc *lookupTestCase) {
vtgateConn, cancel := getVTGateConn()
defer cancel()
var totalRows int
- li := tc.li
+ lv := tc.lv
t.Run("init data", func(t *testing.T) {
totalRows += getNumRowsInQuery(t, tc.initQuery)
@@ -177,28 +177,28 @@ func testLookupVindex(t *testing.T, tc *lookupTestCase) {
})
t.Run("create", func(t *testing.T) {
- tc.li.create()
+ tc.lv.create()
- lks := li.tableKeyspace
- vindexName := li.name
+ lks := lv.tableKeyspace
+ vindexName := lv.name
waitForRowCount(t, vtgateConn, lks, vindexName, totalRows)
totalRows += getNumRowsInQuery(t, tc.runningQuery)
_, err := vtgateConn.ExecuteFetch(tc.runningQuery, 1000, false)
require.NoError(t, err)
- waitForRowCount(t, vtgateConn, tc.li.ownerTableKeyspace, li.name, totalRows)
+ waitForRowCount(t, vtgateConn, tc.lv.ownerTableKeyspace, lv.name, totalRows)
})
t.Run("externalize", func(t *testing.T) {
- tc.li.externalize()
+ tc.lv.externalize()
totalRows += getNumRowsInQuery(t, tc.postExternalizeQuery)
_, err := vtgateConn.ExecuteFetch(tc.postExternalizeQuery, 1000, false)
require.NoError(t, err)
- waitForRowCount(t, vtgateConn, tc.li.ownerTableKeyspace, li.name, totalRows)
+ waitForRowCount(t, vtgateConn, tc.lv.ownerTableKeyspace, lv.name, totalRows)
})
t.Run("cleanup", func(t *testing.T) {
_, err := vtgateConn.ExecuteFetch(tc.cleanupQuery, 1000, false)
require.NoError(t, err)
- waitForRowCount(t, vtgateConn, tc.li.ownerTableKeyspace, li.name, 0)
+ waitForRowCount(t, vtgateConn, tc.lv.ownerTableKeyspace, lv.name, 0)
})
}
diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go
index d3193298a0c..955afde2f18 100644
--- a/go/test/endtoend/vreplication/vreplication_test.go
+++ b/go/test/endtoend/vreplication/vreplication_test.go
@@ -323,8 +323,10 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string
defer func() { defaultReplicas = 1 }()
if binlogRowImage != "" {
- require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir))
- defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir)
+ // Run the e2e test with binlog_row_image=NOBLOB and
+ // binlog_row_value_options=PARTIAL_JSON.
+ require.NoError(t, utils.SetBinlogRowImageOptions("noblob", true, vc.ClusterConfig.tmpDir))
+ defer utils.SetBinlogRowImageOptions("", false, vc.ClusterConfig.tmpDir)
}
defaultCell := vc.Cells[defaultCellName]
@@ -600,8 +602,10 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) {
keyspace := "product"
shard := "0"
- require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir))
- defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir)
+ // Run the e2e test with binlog_row_image=NOBLOB and
+ // binlog_row_value_options=PARTIAL_JSON.
+ require.NoError(t, utils.SetBinlogRowImageOptions("noblob", true, vc.ClusterConfig.tmpDir))
+ defer utils.SetBinlogRowImageOptions("", false, vc.ClusterConfig.tmpDir)
cell1 := vc.Cells["zone1"]
cell2 := vc.Cells["zone2"]
@@ -721,8 +725,18 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl
// Confirm that the 0 scale decimal field, dec80, is replicated correctly
execVtgateQuery(t, vtgateConn, sourceKs, "update customer set dec80 = 0")
execVtgateQuery(t, vtgateConn, sourceKs, "update customer set blb = \"new blob data\" where cid=3")
- execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j1 = null, j2 = 'null', j3 = '\"null\"'")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j1 = null, j2 = 'null', j3 = '\"null\"' where id = 5")
execVtgateQuery(t, vtgateConn, sourceKs, "insert into json_tbl(id, j1, j2, j3) values (7, null, 'null', '\"null\"')")
+ // Test binlog-row-value-options=PARTIAL_JSON
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.role', 'manager')")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.color', 'red')")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.day', 'wednesday')")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_INSERT(JSON_REPLACE(j3, '$.day', 'friday'), '$.favorite_color', 'black')")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'monday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 3")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_REMOVE(JSON_REPLACE(j3, '$.day', 'tuesday'), '$.favorite_color'), '$.hobby', 'skiing') where id = 4")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(JSON_SET(j3, '$.salary', 110), '$.role', 'IC') where id = 4")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j3 = JSON_SET(j3, '$.misc', '{\"address\":\"1012 S Park St\", \"town\":\"Hastings\", \"state\":\"MI\"}') where id = 1")
+ execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set id=id+1000, j3=JSON_SET(j3, '$.day', 'friday')")
waitForNoWorkflowLag(t, vc, targetKs, workflow)
dec80Replicated := false
for _, tablet := range []*cluster.VttabletProcess{customerTab1, customerTab2} {
diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go
index 666a087c049..88242bc9622 100644
--- a/go/test/endtoend/vtgate/queries/misc/misc_test.go
+++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go
@@ -609,3 +609,23 @@ func TestTimeZones(t *testing.T) {
})
}
}
+
+// TestSemiJoin tests that the semi join works as intended.
+func TestSemiJoin(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ for i := 1; i <= 1000; i++ {
+ mcmp.Exec(fmt.Sprintf("insert into t1(id1, id2) values (%d, %d)", i, 2*i))
+ mcmp.Exec(fmt.Sprintf("insert into tbl(id, unq_col, nonunq_col) values (%d, %d, %d)", i, 2*i, 3*i))
+ }
+
+ // Test that the semi join works as intended
+ for _, mode := range []string{"oltp", "olap"} {
+ mcmp.Run(mode, func(mcmp *utils.MySQLCompare) {
+ utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", mode))
+
+ mcmp.Exec("select id1, id2 from t1 where exists (select id from tbl where nonunq_col = t1.id2) order by id1")
+ })
+ }
+}
diff --git a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
index 823655ed785..419a2e843c3 100644
--- a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
+++ b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go
@@ -165,7 +165,6 @@ func TestReadTopologyInstanceBufferable(t *testing.T) {
assert.Empty(t, replicaInstance.LastSQLError)
assert.EqualValues(t, 0, replicaInstance.SQLDelay)
assert.True(t, replicaInstance.UsingOracleGTID)
- assert.False(t, replicaInstance.UsingMariaDBGTID)
assert.Equal(t, replicaInstance.SourceUUID, primaryInstance.ServerUUID)
assert.False(t, replicaInstance.HasReplicationFilters)
assert.LessOrEqual(t, int(replicaInstance.SecondsBehindPrimary.Int64), 1)
diff --git a/go/test/utils/binlog.go b/go/test/utils/binlog.go
index d3f686f1a8a..6e43dd511ad 100644
--- a/go/test/utils/binlog.go
+++ b/go/test/utils/binlog.go
@@ -17,8 +17,10 @@ limitations under the License.
package utils
import (
+ "errors"
"fmt"
"os"
+ "strconv"
"strings"
)
@@ -27,9 +29,12 @@ const (
BinlogRowImageCnf = "binlog-row-image.cnf"
)
-// SetBinlogRowImageMode creates a temp cnf file to set binlog_row_image to noblob for vreplication unit tests.
-// It adds it to the EXTRA_MY_CNF environment variable which appends text from them into my.cnf.
-func SetBinlogRowImageMode(mode string, cnfDir string) error {
+// SetBinlogRowImageOptions creates a temp cnf file to set binlog_row_image=NOBLOB and
+// optionally binlog_row_value_options=PARTIAL_JSON (since it does not exist in 5.7)
+// for vreplication unit tests.
+// It adds it to the EXTRA_MY_CNF environment variable which appends text from them
+// into my.cnf.
+func SetBinlogRowImageOptions(mode string, partialJSON bool, cnfDir string) error {
var newCnfs []string
// remove any existing extra cnfs for binlog row image
@@ -55,6 +60,17 @@ func SetBinlogRowImageMode(mode string, cnfDir string) error {
if err != nil {
return err
}
+ if partialJSON {
+ if !CIDBPlatformIsMySQL8orLater() {
+ return errors.New("partial JSON values are only supported in MySQL 8.0 or later")
+ }
+ // We're testing partial binlog row images so let's also test partial
+ // JSON values in the images.
+ _, err = f.WriteString("\nbinlog_row_value_options=PARTIAL_JSON\n")
+ if err != nil {
+ return err
+ }
+ }
err = f.Close()
if err != nil {
return err
@@ -68,3 +84,30 @@ func SetBinlogRowImageMode(mode string, cnfDir string) error {
}
return nil
}
+
+// CIDBPlatformIsMySQL8orLater returns true if the CI_DB_PLATFORM environment
+// variable is empty -- meaning we're not running in the CI and we assume
+// MySQL8.0 or later is used, and you can understand the failures and make
+// adjustments as necessary -- or it's set to reflect usage of MySQL 8.0 or
+// later. This relies on the current standard values used such as mysql57,
+// mysql80, mysql84, etc. This can be used when the CI test behavior needs
+// to be altered based on the specific database platform we're testing against.
+func CIDBPlatformIsMySQL8orLater() bool {
+ dbPlatform := strings.ToLower(os.Getenv("CI_DB_PLATFORM"))
+ if dbPlatform == "" {
+ // This is for local testing where we don't set the env var via
+ // the CI.
+ return true
+ }
+ if strings.HasPrefix(dbPlatform, "mysql") {
+ _, v, ok := strings.Cut(dbPlatform, "mysql")
+ if ok {
+ // We only want the major version.
+ version, err := strconv.Atoi(string(v[0]))
+ if err == nil && version >= 8 {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/go/test/utils/binlog_test.go b/go/test/utils/binlog_test.go
index 593b964a171..d8a0d6d4222 100644
--- a/go/test/utils/binlog_test.go
+++ b/go/test/utils/binlog_test.go
@@ -24,19 +24,33 @@ import (
"github.com/stretchr/testify/require"
)
-// TestSetBinlogRowImageMode tests the SetBinlogRowImageMode function.
+// TestSetBinlogRowImageOptions tests the SetBinlogRowImageOptions function.
func TestUtils(t *testing.T) {
tmpDir := "/tmp"
cnfFile := fmt.Sprintf("%s/%s", tmpDir, BinlogRowImageCnf)
+
// Test that setting the mode will create the cnf file and add it to the EXTRA_MY_CNF env var.
- require.NoError(t, SetBinlogRowImageMode("noblob", tmpDir))
+ require.NoError(t, SetBinlogRowImageOptions("noblob", false, tmpDir))
data, err := os.ReadFile(cnfFile)
require.NoError(t, err)
require.Contains(t, string(data), "binlog_row_image=noblob")
require.Contains(t, os.Getenv(ExtraCnf), BinlogRowImageCnf)
+ // Test that setting the mode and passing true for includePartialJSON will set both options
+ // as expected.
+ if CIDBPlatformIsMySQL8orLater() {
+ require.NoError(t, SetBinlogRowImageOptions("noblob", true, tmpDir))
+ data, err = os.ReadFile(cnfFile)
+ require.NoError(t, err)
+ require.Contains(t, string(data), "binlog_row_image=noblob")
+ require.Contains(t, string(data), "binlog_row_value_options=PARTIAL_JSON")
+ require.Contains(t, os.Getenv(ExtraCnf), BinlogRowImageCnf)
+ } else {
+ require.Error(t, SetBinlogRowImageOptions("noblob", true, tmpDir))
+ }
+
// Test that clearing the mode will remove the cnf file and the cnf from the EXTRA_MY_CNF env var.
- require.NoError(t, SetBinlogRowImageMode("", tmpDir))
+ require.NoError(t, SetBinlogRowImageOptions("", false, tmpDir))
require.NotContains(t, os.Getenv(ExtraCnf), BinlogRowImageCnf)
_, err = os.Stat(cnfFile)
require.True(t, os.IsNotExist(err))
diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go
index d62fcc3a915..08e06ec803c 100644
--- a/go/vt/binlog/binlog_streamer.go
+++ b/go/vt/binlog/binlog_streamer.go
@@ -760,7 +760,7 @@ func writeValuesAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, rs *my
}
// We have real data.
- value, l, err := binlog.CellValue(data, pos, tce.tm.Types[c], tce.tm.Metadata[c], &querypb.Field{Type: tce.ti.Fields[c].Type})
+ value, l, err := binlog.CellValue(data, pos, tce.tm.Types[c], tce.tm.Metadata[c], &querypb.Field{Type: tce.ti.Fields[c].Type}, false)
if err != nil {
return keyspaceIDCell, nil, err
}
@@ -825,7 +825,7 @@ func writeIdentifiersAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, r
sql.WriteByte('=')
// We have real data.
- value, l, err := binlog.CellValue(data, pos, tce.tm.Types[c], tce.tm.Metadata[c], &querypb.Field{Type: tce.ti.Fields[c].Type})
+ value, l, err := binlog.CellValue(data, pos, tce.tm.Types[c], tce.tm.Metadata[c], &querypb.Field{Type: tce.ti.Fields[c].Type}, false)
if err != nil {
return keyspaceIDCell, nil, err
}
diff --git a/go/vt/mysqlctl/s3backupstorage/s3.go b/go/vt/mysqlctl/s3backupstorage/s3.go
index 97861e83729..4dd583009aa 100644
--- a/go/vt/mysqlctl/s3backupstorage/s3.go
+++ b/go/vt/mysqlctl/s3backupstorage/s3.go
@@ -47,6 +47,7 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3/types"
transport "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
+ "github.com/dustin/go-humanize"
"github.com/spf13/pflag"
errorsbackup "vitess.io/vitess/go/vt/mysqlctl/errors"
@@ -57,6 +58,11 @@ import (
"vitess.io/vitess/go/vt/servenv"
)
+const (
+ sseCustomerPrefix = "sse_c:"
+ MaxPartSize = 1024 * 1024 * 1024 * 5 // 5GiB - limited by AWS https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html
+)
+
var (
// AWS API region
region string
@@ -86,6 +92,11 @@ var (
// path component delimiter
delimiter = "/"
+
+ // minimum part size
+ minPartSize int64
+
+ ErrPartSize = errors.New("minimum S3 part size must be between 5MiB and 5GiB")
)
func registerFlags(fs *pflag.FlagSet) {
@@ -98,6 +109,7 @@ func registerFlags(fs *pflag.FlagSet) {
fs.BoolVar(&tlsSkipVerifyCert, "s3_backup_tls_skip_verify_cert", false, "skip the 'certificate is valid' check for SSL connections.")
fs.StringVar(&requiredLogLevel, "s3_backup_log_level", "LogOff", "determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors.")
fs.StringVar(&sse, "s3_backup_server_side_encryption", "", "server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file).")
+ fs.Int64Var(&minPartSize, "s3_backup_aws_min_partsize", manager.MinUploadPartSize, "Minimum part size to use, defaults to 5MiB but can be increased due to the dataset size.")
}
func init() {
@@ -111,8 +123,6 @@ type logNameToLogLevel map[string]aws.ClientLogMode
var logNameMap logNameToLogLevel
-const sseCustomerPrefix = "sse_c:"
-
type endpointResolver struct {
r s3.EndpointResolverV2
endpoint *string
@@ -166,7 +176,12 @@ func (bh *S3BackupHandle) AddFile(ctx context.Context, filename string, filesize
return nil, fmt.Errorf("AddFile cannot be called on read-only backup")
}
- partSizeBytes := calculateUploadPartSize(filesize)
+ partSizeBytes, err := calculateUploadPartSize(filesize)
+ if err != nil {
+ return nil, err
+ }
+
+ bh.bs.params.Logger.Infof("Using S3 upload part size: %s", humanize.IBytes(uint64(partSizeBytes)))
reader, writer := io.Pipe()
bh.handleAddFile(ctx, filename, partSizeBytes, reader, func(err error) {
@@ -213,9 +228,11 @@ func (bh *S3BackupHandle) handleAddFile(ctx context.Context, filename string, pa
}()
}
-func calculateUploadPartSize(filesize int64) int64 {
+// calculateUploadPartSize is a helper to calculate the part size, taking into consideration the minimum part size
+// passed in by an operator.
+func calculateUploadPartSize(filesize int64) (partSizeBytes int64, err error) {
// Calculate s3 upload part size using the source filesize
- partSizeBytes := manager.DefaultUploadPartSize
+ partSizeBytes = manager.DefaultUploadPartSize
if filesize > 0 {
minimumPartSize := float64(filesize) / float64(manager.MaxUploadParts)
// Round up to ensure large enough partsize
@@ -224,7 +241,17 @@ func calculateUploadPartSize(filesize int64) int64 {
partSizeBytes = calculatedPartSizeBytes
}
}
- return partSizeBytes
+
+ if minPartSize != 0 && partSizeBytes < minPartSize {
+ if minPartSize > MaxPartSize || minPartSize < manager.MinUploadPartSize { // 5GiB and 5MiB respectively
+ return 0, fmt.Errorf("%w, currently set to %s",
+ ErrPartSize, humanize.IBytes(uint64(minPartSize)),
+ )
+ }
+ partSizeBytes = int64(minPartSize)
+ }
+
+ return
}
// EndBackup is part of the backupstorage.BackupHandle interface.
diff --git a/go/vt/mysqlctl/s3backupstorage/s3_mock.go b/go/vt/mysqlctl/s3backupstorage/s3_mock.go
index f244c4d63b1..910a22bd9d5 100644
--- a/go/vt/mysqlctl/s3backupstorage/s3_mock.go
+++ b/go/vt/mysqlctl/s3backupstorage/s3_mock.go
@@ -162,7 +162,11 @@ func FailFirstWrite(s3bh *S3BackupHandle, ctx context.Context, filename string,
return nil, fmt.Errorf("AddFile cannot be called on read-only backup")
}
- partSizeBytes := calculateUploadPartSize(filesize)
+ partSizeBytes, err := calculateUploadPartSize(filesize)
+ if err != nil {
+ return nil, err
+ }
+
reader, writer := io.Pipe()
r := io.Reader(reader)
@@ -181,7 +185,11 @@ func FailAllWrites(s3bh *S3BackupHandle, ctx context.Context, filename string, f
return nil, fmt.Errorf("AddFile cannot be called on read-only backup")
}
- partSizeBytes := calculateUploadPartSize(filesize)
+ partSizeBytes, err := calculateUploadPartSize(filesize)
+ if err != nil {
+ return nil, err
+ }
+
reader, writer := io.Pipe()
r := &failReadPipeReader{PipeReader: reader}
diff --git a/go/vt/mysqlctl/s3backupstorage/s3_test.go b/go/vt/mysqlctl/s3backupstorage/s3_test.go
index 84ef8de6e48..5e9364219af 100644
--- a/go/vt/mysqlctl/s3backupstorage/s3_test.go
+++ b/go/vt/mysqlctl/s3backupstorage/s3_test.go
@@ -328,3 +328,68 @@ func TestWithParams(t *testing.T) {
assert.NotNil(t, s3.transport.DialContext)
assert.NotNil(t, s3.transport.Proxy)
}
+
+func TestCalculateUploadPartSize(t *testing.T) {
+ originalMinimum := minPartSize
+ defer func() { minPartSize = originalMinimum }()
+
+ tests := []struct {
+ name string
+ filesize int64
+ minimumPartSize int64
+ want int64
+ err error
+ }{
+ {
+ name: "minimum - 10 MiB",
+ filesize: 1024 * 1024 * 10, // 10 MiB
+ minimumPartSize: 1024 * 1024 * 5, // 5 MiB
+ want: 1024 * 1024 * 5, // 5 MiB,
+ err: nil,
+ },
+ {
+ name: "below minimum - 10 MiB",
+ filesize: 1024 * 1024 * 10, // 10 MiB
+ minimumPartSize: 1024 * 1024 * 8, // 8 MiB
+ want: 1024 * 1024 * 8, // 8 MiB,
+ err: nil,
+ },
+ {
+ name: "above minimum - 1 TiB",
+ filesize: 1024 * 1024 * 1024 * 1024, // 1 TiB
+ minimumPartSize: 1024 * 1024 * 5, // 5 MiB
+ want: 109951163, // ~104 MiB
+ err: nil,
+ },
+ {
+ name: "below minimum - 1 TiB",
+ filesize: 1024 * 1024 * 1024 * 1024, // 1 TiB
+ minimumPartSize: 1024 * 1024 * 200, // 200 MiB
+ want: 1024 * 1024 * 200, // 200 MiB
+ err: nil,
+ },
+ {
+ name: "below S3 limits - 5 MiB",
+ filesize: 1024 * 1024 * 3, // 3 MiB
+ minimumPartSize: 1024 * 1024 * 4, // 4 MiB
+ want: 1024 * 1024 * 5, // 5 MiB - should always return the minimum
+ err: nil,
+ },
+ {
+ name: "above S3 limits - 5 GiB",
+ filesize: 1024 * 1024 * 1024 * 1024, // 1 TiB
+ minimumPartSize: 1024 * 1024 * 1024 * 6, // 6 GiB
+ want: 0,
+ err: ErrPartSize,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ minPartSize = tt.minimumPartSize
+ partSize, err := calculateUploadPartSize(tt.filesize)
+ require.ErrorIs(t, err, tt.err)
+ require.Equal(t, tt.want, partSize)
+ })
+ }
+}
diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go
index 35e738f772e..e3523b6b384 100644
--- a/go/vt/proto/binlogdata/binlogdata.pb.go
+++ b/go/vt/proto/binlogdata/binlogdata.pb.go
@@ -1333,8 +1333,17 @@ type RowChange struct {
Before *query.Row `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"`
After *query.Row `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"`
- // DataColumns is a bitmap of all columns: bit is set if column is present in the after image
+ // DataColumns is a bitmap of all columns: bit is set if column is
+ // present in the after image.
DataColumns *RowChange_Bitmap `protobuf:"bytes,3,opt,name=data_columns,json=dataColumns,proto3" json:"data_columns,omitempty"`
+ // JsonPartialValues is a bitmap of any JSON columns, where the bit
+ // is set if the value in the AFTER image is a partial JSON value
+ // that is represented as an expression of
+ // JSON_[INSERT|REPLACE|REMOVE](%s, '$.path', value) which then is
+ // used to add/update/remove a path in the JSON document. When the
+ // value is used the fmt directive must be replaced by the actual
+ // column name of the JSON field.
+ JsonPartialValues *RowChange_Bitmap `protobuf:"bytes,4,opt,name=json_partial_values,json=jsonPartialValues,proto3" json:"json_partial_values,omitempty"`
}
func (x *RowChange) Reset() {
@@ -1388,6 +1397,13 @@ func (x *RowChange) GetDataColumns() *RowChange_Bitmap {
return nil
}
+func (x *RowChange) GetJsonPartialValues() *RowChange_Bitmap {
+ if x != nil {
+ return x.JsonPartialValues
+ }
+ return nil
+}
+
// RowEvent represent row events for one table.
type RowEvent struct {
state protoimpl.MessageState
@@ -3223,7 +3239,7 @@ var file_binlogdata_proto_rawDesc = []byte{
0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f,
0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22,
- 0xc6, 0x01, 0x0a, 0x09, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x22, 0x0a,
+ 0x94, 0x02, 0x0a, 0x09, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x22, 0x0a,
0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e,
0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72,
0x65, 0x12, 0x20, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
@@ -3232,313 +3248,318 @@ var file_binlogdata_proto_rawDesc = []byte{
0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x69, 0x6e, 0x6c,
0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65,
0x2e, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c,
- 0x75, 0x6d, 0x6e, 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x12, 0x14,
- 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x68, 0x61, 0x6e,
- 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c,
- 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65,
- 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72,
- 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14,
- 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66,
- 0x6c, 0x61, 0x67, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0f, 0x69, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
- 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24,
- 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c,
- 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x73,
- 0x65, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73,
- 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x6e, 0x75, 0x6d, 0x53, 0x65, 0x74, 0x53,
- 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x69,
- 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e,
- 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x47, 0x74, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50,
- 0x4b, 0x73, 0x22, 0x3f, 0x0a, 0x05, 0x56, 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74,
- 0x69, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72, 0x6e,
- 0x61, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
- 0x69, 0x64, 0x12, 0x40, 0x0a, 0x0e, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e,
- 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e,
- 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69,
- 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f,
- 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52,
- 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x70,
- 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b,
- 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b,
- 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74,
- 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16,
+ 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x13, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x72,
+ 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52,
+ 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x52,
+ 0x11, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a,
+ 0x72, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65,
+ 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05,
+ 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61,
+ 0x67, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
+ 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69,
+ 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xe4,
+ 0x01, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x73, 0x65, 0x74,
+ 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x19,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x6e, 0x75, 0x6d, 0x53, 0x65, 0x74, 0x53, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x69, 0x73, 0x5f,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x1a,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47,
+ 0x74, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
+ 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e,
+ 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x4b, 0x73,
+ 0x22, 0x3f, 0x0a, 0x05, 0x56, 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64,
+ 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x22, 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c,
+ 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64,
+ 0x12, 0x40, 0x0a, 0x0e, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f,
+ 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x0d, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73,
+ 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73,
+ 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x61, 0x72,
+ 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74,
+ 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c,
+ 0x6f, 0x77, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a,
+ 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x62,
+ 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09,
+ 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x09, 0x72, 0x6f,
+ 0x77, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a,
+ 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12,
+ 0x2d, 0x0a, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x13, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4a, 0x6f,
+ 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x10,
+ 0x0a, 0x03, 0x64, 0x6d, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6d, 0x6c,
+ 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x5f,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69,
+ 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x16, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a,
+ 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64,
+ 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65,
+ 0x64, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x72,
+ 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x68, 0x72,
+ 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x8d, 0x01, 0x0a,
+ 0x0c, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52,
+ 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63,
+ 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b,
+ 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x5f, 0x6b, 0x5f, 0x69,
+ 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x70, 0x4b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x41, 0x0a, 0x0d,
+ 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a,
+ 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e,
+ 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d,
+ 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22,
+ 0xd9, 0x01, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x74,
+ 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x10, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76,
+ 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfd, 0x02, 0x0a, 0x0e,
+ 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f,
+ 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74,
+ 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66,
+ 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12,
+ 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a,
+ 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c,
+ 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c,
+ 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17,
+ 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c,
+ 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61,
+ 0x73, 0x74, 0x50, 0x4b, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a,
+ 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12,
0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65,
- 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09,
- 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74,
- 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1c,
- 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x09,
- 0x72, 0x6f, 0x77, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x14, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
- 0x37, 0x0a, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74,
- 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69,
- 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69,
- 0x64, 0x12, 0x2d, 0x0a, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e,
- 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c,
- 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6d, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
- 0x6d, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
- 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f,
- 0x6b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e,
- 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x50,
- 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18,
- 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c,
- 0x65, 0x64, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74,
- 0x6c, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64,
- 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74,
- 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x8d,
- 0x01, 0x0a, 0x0c, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b,
- 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09,
- 0x70, 0x4b, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x5f, 0x6b,
- 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0b, 0x70, 0x4b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x41,
- 0x0a, 0x0d, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
- 0x30, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e,
- 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x73, 0x22, 0xd9, 0x01, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
- 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x5a, 0x0a,
- 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69,
- 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
- 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfd, 0x02,
- 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11,
- 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
- 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
- 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c,
- 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65,
- 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12,
- 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66,
- 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69,
- 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52,
- 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61,
- 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65,
- 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f,
+ 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbb, 0x02, 0x0a, 0x12, 0x56,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
+ 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
+ 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f,
+ 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74,
+ 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72,
+ 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74,
+ 0x70, 0x6b, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa4, 0x02, 0x0a, 0x13, 0x56, 0x53, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73,
+ 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04,
+ 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77,
+ 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f,
+ 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72,
+ 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62,
+ 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74,
+ 0x62, 0x65, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65,
+ 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
+ 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22,
+ 0xfb, 0x01, 0x0a, 0x14, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61,
+ 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76,
+ 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d,
+ 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56,
+ 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69,
+ 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
+ 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52,
+ 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f,
0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3d, 0x0a,
- 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbb, 0x02, 0x0a,
- 0x12, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
- 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49,
- 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74,
- 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65,
- 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69,
- 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74,
- 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61,
- 0x73, 0x74, 0x70, 0x6b, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61,
- 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa4, 0x02, 0x0a, 0x13, 0x56,
- 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xde, 0x01,
+ 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08,
+ 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c,
+ 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f,
+ 0x77, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61,
+ 0x73, 0x74, 0x70, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0x69,
+ 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a,
+ 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61,
+ 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0b,
+ 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, 0x63,
+ 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09,
+ 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, 0x62,
+ 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c,
+ 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70,
+ 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73,
+ 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a,
+ 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65,
+ 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72,
+ 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45,
+ 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c,
+ 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75,
+ 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
+ 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c,
+ 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05,
+ 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71,
+ 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77,
- 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52,
- 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68,
- 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74,
- 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72,
- 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x65, 0x61,
- 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74,
- 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f,
- 0x6e, 0x22, 0xfb, 0x01, 0x0a, 0x14, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66,
- 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e,
- 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74,
- 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69,
- 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52,
- 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72,
- 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e,
- 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
- 0xde, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62,
- 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74,
- 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28,
- 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08,
- 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04,
- 0x72, 0x6f, 0x77, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65,
- 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06,
- 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b,
- 0x22, 0x69, 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
- 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f,
- 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67,
- 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b,
- 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a,
- 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54,
- 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61,
- 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
- 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73,
- 0x74, 0x70, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c,
- 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61,
- 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c,
- 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76,
- 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65,
- 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64,
- 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61,
- 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c,
- 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43,
- 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
- 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14,
- 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24,
- 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c,
- 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73,
- 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52,
- 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44,
- 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52,
- 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a,
- 0x04, 0x45, 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f,
- 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c,
- 0x69, 0x7a, 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62,
- 0x6c, 0x65, 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c,
- 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07,
- 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73,
- 0x68, 0x61, 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65,
- 0x44, 0x44, 0x4c, 0x10, 0x05, 0x2a, 0x44, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b,
- 0x0a, 0x07, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41,
- 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x10, 0x02, 0x2a, 0x71, 0x0a, 0x19, 0x56,
- 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66,
- 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e,
- 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x10, 0x01, 0x12,
- 0x0b, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07,
- 0x43, 0x6f, 0x70, 0x79, 0x69, 0x6e, 0x67, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x75, 0x6e,
- 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10,
- 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x4c, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x10, 0x06, 0x2a, 0x8d,
- 0x02, 0x0a, 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
- 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54,
- 0x49, 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12,
- 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52,
- 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c,
- 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b,
- 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55,
- 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54,
- 0x45, 0x10, 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05,
- 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c,
- 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48,
- 0x45, 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47,
- 0x54, 0x49, 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c,
- 0x10, 0x10, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12,
- 0x0a, 0x0a, 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53,
- 0x41, 0x56, 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f,
- 0x50, 0x59, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27,
- 0x0a, 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53,
- 0x48, 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73,
- 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76,
- 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x45,
+ 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, 0x47,
+ 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a,
+ 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f,
+ 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x69,
+ 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, 0x61,
+ 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x44,
+ 0x4c, 0x10, 0x05, 0x2a, 0x44, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07,
+ 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x74, 0x6f,
+ 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x10, 0x02, 0x2a, 0x71, 0x0a, 0x19, 0x56, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
+ 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x10, 0x01, 0x12, 0x0b, 0x0a,
+ 0x07, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x6f,
+ 0x70, 0x79, 0x69, 0x6e, 0x67, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x6e, 0x69,
+ 0x6e, 0x67, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x05, 0x12,
+ 0x0b, 0x0a, 0x07, 0x4c, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x10, 0x06, 0x2a, 0x8d, 0x02, 0x0a,
+ 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55,
+ 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44,
+ 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a,
+ 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c,
+ 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05,
+ 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07,
+ 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44,
+ 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10,
+ 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54,
+ 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09,
+ 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41,
+ 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49,
+ 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10,
+ 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a,
+ 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56,
+ 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59,
+ 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, 0x0d,
+ 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a,
+ 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41,
+ 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e,
+ 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -3631,61 +3652,62 @@ var file_binlogdata_proto_depIdxs = []int32{
48, // 16: binlogdata.RowChange.before:type_name -> query.Row
48, // 17: binlogdata.RowChange.after:type_name -> query.Row
43, // 18: binlogdata.RowChange.data_columns:type_name -> binlogdata.RowChange.Bitmap
- 18, // 19: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange
- 49, // 20: binlogdata.FieldEvent.fields:type_name -> query.Field
- 36, // 21: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK
- 21, // 22: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid
- 5, // 23: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType
- 21, // 24: binlogdata.Journal.shard_gtids:type_name -> binlogdata.ShardGtid
- 23, // 25: binlogdata.Journal.participants:type_name -> binlogdata.KeyspaceShard
- 4, // 26: binlogdata.VEvent.type:type_name -> binlogdata.VEventType
- 19, // 27: binlogdata.VEvent.row_event:type_name -> binlogdata.RowEvent
- 20, // 28: binlogdata.VEvent.field_event:type_name -> binlogdata.FieldEvent
- 22, // 29: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid
- 24, // 30: binlogdata.VEvent.journal:type_name -> binlogdata.Journal
- 35, // 31: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent
- 49, // 32: binlogdata.MinimalTable.fields:type_name -> query.Field
- 26, // 33: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable
- 44, // 34: binlogdata.VStreamOptions.config_overrides:type_name -> binlogdata.VStreamOptions.ConfigOverridesEntry
- 50, // 35: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 51, // 36: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 52, // 37: binlogdata.VStreamRequest.target:type_name -> query.Target
- 16, // 38: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter
- 36, // 39: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK
- 28, // 40: binlogdata.VStreamRequest.options:type_name -> binlogdata.VStreamOptions
- 25, // 41: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent
- 50, // 42: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 51, // 43: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 52, // 44: binlogdata.VStreamRowsRequest.target:type_name -> query.Target
- 53, // 45: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult
- 28, // 46: binlogdata.VStreamRowsRequest.options:type_name -> binlogdata.VStreamOptions
- 49, // 47: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field
- 49, // 48: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field
- 48, // 49: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row
- 48, // 50: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row
- 50, // 51: binlogdata.VStreamTablesRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 51, // 52: binlogdata.VStreamTablesRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 52, // 53: binlogdata.VStreamTablesRequest.target:type_name -> query.Target
- 28, // 54: binlogdata.VStreamTablesRequest.options:type_name -> binlogdata.VStreamOptions
- 49, // 55: binlogdata.VStreamTablesResponse.fields:type_name -> query.Field
- 49, // 56: binlogdata.VStreamTablesResponse.pkfields:type_name -> query.Field
- 48, // 57: binlogdata.VStreamTablesResponse.rows:type_name -> query.Row
- 48, // 58: binlogdata.VStreamTablesResponse.lastpk:type_name -> query.Row
- 36, // 59: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK
- 53, // 60: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult
- 50, // 61: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID
- 51, // 62: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID
- 52, // 63: binlogdata.VStreamResultsRequest.target:type_name -> query.Target
- 49, // 64: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field
- 48, // 65: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row
- 6, // 66: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category
- 8, // 67: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset
- 14, // 68: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion
- 69, // [69:69] is the sub-list for method output_type
- 69, // [69:69] is the sub-list for method input_type
- 69, // [69:69] is the sub-list for extension type_name
- 69, // [69:69] is the sub-list for extension extendee
- 0, // [0:69] is the sub-list for field type_name
+ 43, // 19: binlogdata.RowChange.json_partial_values:type_name -> binlogdata.RowChange.Bitmap
+ 18, // 20: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange
+ 49, // 21: binlogdata.FieldEvent.fields:type_name -> query.Field
+ 36, // 22: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK
+ 21, // 23: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid
+ 5, // 24: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType
+ 21, // 25: binlogdata.Journal.shard_gtids:type_name -> binlogdata.ShardGtid
+ 23, // 26: binlogdata.Journal.participants:type_name -> binlogdata.KeyspaceShard
+ 4, // 27: binlogdata.VEvent.type:type_name -> binlogdata.VEventType
+ 19, // 28: binlogdata.VEvent.row_event:type_name -> binlogdata.RowEvent
+ 20, // 29: binlogdata.VEvent.field_event:type_name -> binlogdata.FieldEvent
+ 22, // 30: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid
+ 24, // 31: binlogdata.VEvent.journal:type_name -> binlogdata.Journal
+ 35, // 32: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent
+ 49, // 33: binlogdata.MinimalTable.fields:type_name -> query.Field
+ 26, // 34: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable
+ 44, // 35: binlogdata.VStreamOptions.config_overrides:type_name -> binlogdata.VStreamOptions.ConfigOverridesEntry
+ 50, // 36: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 51, // 37: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 52, // 38: binlogdata.VStreamRequest.target:type_name -> query.Target
+ 16, // 39: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter
+ 36, // 40: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK
+ 28, // 41: binlogdata.VStreamRequest.options:type_name -> binlogdata.VStreamOptions
+ 25, // 42: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent
+ 50, // 43: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 51, // 44: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 52, // 45: binlogdata.VStreamRowsRequest.target:type_name -> query.Target
+ 53, // 46: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult
+ 28, // 47: binlogdata.VStreamRowsRequest.options:type_name -> binlogdata.VStreamOptions
+ 49, // 48: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field
+ 49, // 49: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field
+ 48, // 50: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row
+ 48, // 51: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row
+ 50, // 52: binlogdata.VStreamTablesRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 51, // 53: binlogdata.VStreamTablesRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 52, // 54: binlogdata.VStreamTablesRequest.target:type_name -> query.Target
+ 28, // 55: binlogdata.VStreamTablesRequest.options:type_name -> binlogdata.VStreamOptions
+ 49, // 56: binlogdata.VStreamTablesResponse.fields:type_name -> query.Field
+ 49, // 57: binlogdata.VStreamTablesResponse.pkfields:type_name -> query.Field
+ 48, // 58: binlogdata.VStreamTablesResponse.rows:type_name -> query.Row
+ 48, // 59: binlogdata.VStreamTablesResponse.lastpk:type_name -> query.Row
+ 36, // 60: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK
+ 53, // 61: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult
+ 50, // 62: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID
+ 51, // 63: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID
+ 52, // 64: binlogdata.VStreamResultsRequest.target:type_name -> query.Target
+ 49, // 65: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field
+ 48, // 66: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row
+ 6, // 67: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category
+ 8, // 68: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset
+ 14, // 69: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion
+ 70, // [70:70] is the sub-list for method output_type
+ 70, // [70:70] is the sub-list for method input_type
+ 70, // [70:70] is the sub-list for extension type_name
+ 70, // [70:70] is the sub-list for extension extendee
+ 0, // [0:70] is the sub-list for field type_name
}
func init() { file_binlogdata_proto_init() }
diff --git a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go
index 98fba617973..93b378738dd 100644
--- a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go
+++ b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go
@@ -314,6 +314,7 @@ func (m *RowChange) CloneVT() *RowChange {
r.Before = m.Before.CloneVT()
r.After = m.After.CloneVT()
r.DataColumns = m.DataColumns.CloneVT()
+ r.JsonPartialValues = m.JsonPartialValues.CloneVT()
if len(m.unknownFields) > 0 {
r.unknownFields = make([]byte, len(m.unknownFields))
copy(r.unknownFields, m.unknownFields)
@@ -1675,6 +1676,16 @@ func (m *RowChange) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
+ if m.JsonPartialValues != nil {
+ size, err := m.JsonPartialValues.MarshalToSizedBufferVT(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
if m.DataColumns != nil {
size, err := m.DataColumns.MarshalToSizedBufferVT(dAtA[:i])
if err != nil {
@@ -3636,6 +3647,10 @@ func (m *RowChange) SizeVT() (n int) {
l = m.DataColumns.SizeVT()
n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
}
+ if m.JsonPartialValues != nil {
+ l = m.JsonPartialValues.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
n += len(m.unknownFields)
return n
}
@@ -6619,6 +6634,42 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field JsonPartialValues", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return protohelpers.ErrIntOverflow
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return protohelpers.ErrInvalidLength
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return protohelpers.ErrInvalidLength
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.JsonPartialValues == nil {
+ m.JsonPartialValues = &RowChange_Bitmap{}
+ }
+ if err := m.JsonPartialValues.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := protohelpers.Skip(dAtA[iNdEx:])
diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go
index 4f91459d9ed..a54090bb044 100644
--- a/go/vt/vtadmin/api.go
+++ b/go/vt/vtadmin/api.go
@@ -1785,7 +1785,7 @@ func (api *API) VDiffShow(ctx context.Context, req *vtadminpb.VDiffShowRequest)
}
}
if report.State == string(vdiff.StartedState) {
- progress := vdiffcmd.BuildProgressReport(report.RowsCompared, totalRowsToCompare, report.StartedAt)
+ progress := workflow.BuildProgressReport(report.RowsCompared, totalRowsToCompare, report.StartedAt)
report.Progress = &vtadminpb.VDiffProgress{
Percentage: progress.Percentage,
Eta: progress.ETA,
diff --git a/go/vt/vtctl/workflow/lookup_vindex.go b/go/vt/vtctl/workflow/lookup_vindex.go
new file mode 100644
index 00000000000..cf9b4833c28
--- /dev/null
+++ b/go/vt/vtctl/workflow/lookup_vindex.go
@@ -0,0 +1,542 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "strings"
+
+ "golang.org/x/exp/maps"
+ "google.golang.org/protobuf/proto"
+
+ "vitess.io/vitess/go/sqlescape"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/schema"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/vtctl/schematools"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+)
+
+// lookupVindex is responsible for performing actions related to lookup vindexes.
+type lookupVindex struct {
+ ts *topo.Server
+ tmc tmclient.TabletManagerClient
+
+ logger logutil.Logger
+ parser *sqlparser.Parser
+}
+
+// newLookupVindex creates a new lookupVindex instance which is responsible
+// for performing actions related to lookup vindexes.
+func newLookupVindex(ws *Server) *lookupVindex {
+ return &lookupVindex{
+ ts: ws.ts,
+ tmc: ws.tmc,
+ logger: ws.Logger(),
+ parser: ws.SQLParser(),
+ }
+}
+
+// prepareCreate performs the preparatory steps for creating a Lookup Vindex.
+func (lv *lookupVindex) prepareCreate(ctx context.Context, workflow, keyspace string, specs *vschemapb.Keyspace, continueAfterCopyWithOwner bool) (
+ ms *vtctldatapb.MaterializeSettings, sourceVSchema, targetVSchema *vschemapb.Keyspace, cancelFunc func() error, err error) {
+ var (
+ // sourceVSchemaTable is the table info present in the vschema.
+ sourceVSchemaTable *vschemapb.Table
+ // sourceVindexColumns are computed from the input sourceTable.
+ sourceVindexColumns []string
+
+ // Target table info.
+ createDDL string
+ materializeQuery string
+ )
+
+ // Validate input vindex.
+ vindex, vInfo, err := lv.validateAndGetVindex(specs)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ vInfo.sourceTable, vInfo.sourceTableName, err = getSourceTable(specs, vInfo.targetTableName, vInfo.fromCols)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ sourceVindexColumns, err = validateSourceTableAndGetVindexColumns(vInfo, vindex, keyspace)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ sourceVSchema, targetVSchema, err = lv.getTargetAndSourceVSchema(ctx, keyspace, vInfo.targetKeyspace)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ if existing, ok := sourceVSchema.Vindexes[vInfo.name]; ok {
+ if !proto.Equal(existing, vindex) { // If the exact same vindex already exists then we can re-use it
+ return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "a conflicting vindex named %s already exists in the %s keyspace",
+ vInfo.name, keyspace)
+ }
+ }
+
+ sourceVSchemaTable = sourceVSchema.Tables[vInfo.sourceTableName]
+ if sourceVSchemaTable == nil && !schema.IsInternalOperationTableName(vInfo.sourceTableName) {
+ return nil, nil, nil, nil,
+ vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table %s not found in the %s keyspace", vInfo.sourceTableName, keyspace)
+ }
+ if err := validateNonConflictingColumnVindex(sourceVSchemaTable, vInfo, sourceVindexColumns, keyspace); err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ // Validate against source schema.
+ sourceShards, err := lv.ts.GetServingShards(ctx, keyspace)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ onesource := sourceShards[0]
+ if onesource.PrimaryAlias == nil {
+ return nil, nil, nil, nil,
+ vterrors.Errorf(vtrpcpb.Code_INTERNAL, "source shard %s has no primary", onesource.ShardName())
+ }
+
+ req := &tabletmanagerdatapb.GetSchemaRequest{Tables: []string{vInfo.sourceTableName}}
+ tableSchema, err := schematools.GetSchema(ctx, lv.ts, lv.tmc, onesource.PrimaryAlias, req)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ if len(tableSchema.TableDefinitions) != 1 {
+ return nil, nil, nil, nil,
+ vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected number of tables (%d) returned from %s schema",
+ len(tableSchema.TableDefinitions), keyspace)
+ }
+
+ // Generate "create table" statement.
+ createDDL, err = lv.generateCreateDDLStatement(tableSchema, sourceVindexColumns, vInfo, vindex)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ // Generate vreplication query.
+ materializeQuery = generateMaterializeQuery(vInfo, vindex, sourceVindexColumns)
+
+ // Save a copy of the original vschema if we modify it and need to provide
+ // a cancelFunc.
+ ogTargetVSchema := targetVSchema.CloneVT()
+ targetChanged := false
+
+ // Update targetVSchema.
+ targetTable := specs.Tables[vInfo.targetTableName]
+ if targetVSchema.Sharded {
+ // Choose a primary vindex type for the lookup table based on the source
+ // definition if one was not explicitly specified.
+ var targetVindexType string
+ var targetVindex *vschemapb.Vindex
+ for _, field := range tableSchema.TableDefinitions[0].Fields {
+ if sourceVindexColumns[0] == field.Name {
+ if targetTable != nil && len(targetTable.ColumnVindexes) > 0 {
+ targetVindexType = targetTable.ColumnVindexes[0].Name
+ }
+ if targetVindexType == "" {
+ targetVindexType, err = vindexes.ChooseVindexForType(field.Type)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+ }
+ targetVindex = &vschemapb.Vindex{
+ Type: targetVindexType,
+ }
+ break
+ }
+ }
+ if targetVindex == nil {
+ // Unreachable. We validated column names when generating the DDL.
+ return nil, nil, nil, nil,
+ vterrors.Errorf(vtrpcpb.Code_INTERNAL, "column %s not found in target schema %s",
+ sourceVindexColumns[0], tableSchema.TableDefinitions[0].Schema)
+ }
+
+ if existing, ok := targetVSchema.Vindexes[targetVindexType]; ok {
+ if !proto.Equal(existing, targetVindex) {
+ return nil, nil, nil, nil,
+ vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting vindex named %v already exists in the %s keyspace",
+ targetVindexType, vInfo.targetKeyspace)
+ }
+ } else {
+ targetVSchema.Vindexes[targetVindexType] = targetVindex
+ targetChanged = true
+ }
+
+ targetTable = &vschemapb.Table{
+ ColumnVindexes: []*vschemapb.ColumnVindex{{
+ Column: vInfo.fromCols[0],
+ Name: targetVindexType,
+ }},
+ }
+ } else {
+ targetTable = &vschemapb.Table{}
+ }
+ if existing, ok := targetVSchema.Tables[vInfo.targetTableName]; ok {
+ if !proto.Equal(existing, targetTable) {
+ return nil, nil, nil, nil,
+ vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting table named %s already exists in the %s vschema",
+ vInfo.targetTableName, vInfo.targetKeyspace)
+ }
+ } else {
+ targetVSchema.Tables[vInfo.targetTableName] = targetTable
+ targetChanged = true
+ }
+
+ if targetChanged {
+ cancelFunc = func() error {
+ // Restore the original target vschema.
+ return lv.ts.SaveVSchema(ctx, vInfo.targetKeyspace, ogTargetVSchema)
+ }
+ }
+
+ ms = &vtctldatapb.MaterializeSettings{
+ Workflow: workflow,
+ MaterializationIntent: vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX,
+ SourceKeyspace: keyspace,
+ TargetKeyspace: vInfo.targetKeyspace,
+ StopAfterCopy: vindex.Owner != "" && !continueAfterCopyWithOwner,
+ TableSettings: []*vtctldatapb.TableMaterializeSettings{{
+ TargetTable: vInfo.targetTableName,
+ SourceExpression: materializeQuery,
+ CreateDdl: createDDL,
+ }},
+ }
+
+ // Update sourceVSchema
+ sourceVSchema.Vindexes[vInfo.name] = vindex
+ sourceVSchemaTable.ColumnVindexes = append(sourceVSchemaTable.ColumnVindexes, vInfo.sourceTable.ColumnVindexes[0])
+
+ return ms, sourceVSchema, targetVSchema, cancelFunc, nil
+}
+
+// vindexInfo holds the validated vindex configuration
+type vindexInfo struct {
+ name string
+ targetKeyspace string
+ targetTableName string
+ fromCols []string
+ toCol string
+ ignoreNulls bool
+
+ // sourceTable is the supplied table info.
+ sourceTable *vschemapb.Table
+ sourceTableName string
+}
+
+// validateAndGetVindex validates and extracts vindex configuration
+func (lv *lookupVindex) validateAndGetVindex(specs *vschemapb.Keyspace) (*vschemapb.Vindex, *vindexInfo, error) {
+ if specs == nil {
+ return nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no vindex provided")
+ }
+ if len(specs.Vindexes) != 1 {
+ return nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "only one vindex must be specified")
+ }
+
+ vindexName := maps.Keys(specs.Vindexes)[0]
+ vindex := maps.Values(specs.Vindexes)[0]
+
+ if !strings.Contains(vindex.Type, "lookup") {
+ return nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex %s is not a lookup type", vindex.Type)
+ }
+
+ targetKeyspace, targetTableName, err := lv.parser.ParseTable(vindex.Params["table"])
+ if err != nil || targetKeyspace == "" {
+ return nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT,
+ "vindex table name (%s) must be in the form .", vindex.Params["table"])
+ }
+
+ vindexFromCols := strings.Split(vindex.Params["from"], ",")
+ for i, col := range vindexFromCols {
+ vindexFromCols[i] = strings.TrimSpace(col)
+ }
+
+ if strings.Contains(vindex.Type, "unique") {
+ if len(vindexFromCols) != 1 {
+ return nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unique vindex 'from' should have only one column")
+ }
+ }
+
+ vindexToCol := vindex.Params["to"]
+ // Make the vindex write_only. If one exists already in the vschema,
+ // it will need to match this vindex exactly, including the write_only setting.
+ vindex.Params["write_only"] = "true"
+
+ // See if we can create the vindex without errors.
+ if _, err := vindexes.CreateVindex(vindex.Type, vindexName, vindex.Params); err != nil {
+ return nil, nil, err
+ }
+
+ ignoreNulls := false
+ if ignoreNullsStr, ok := vindex.Params["ignore_nulls"]; ok {
+ // This mirrors the behavior of vindexes.boolFromMap().
+ switch ignoreNullsStr {
+ case "true":
+ ignoreNulls = true
+ case "false":
+ ignoreNulls = false
+ default:
+ return nil, nil,
+ vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls (%s) value must be 'true' or 'false'",
+ ignoreNullsStr)
+ }
+ }
+
+ // Validate input table.
+ if len(specs.Tables) < 1 || len(specs.Tables) > 2 {
+ return nil, nil, fmt.Errorf("one or two tables must be specified")
+ }
+
+ return vindex, &vindexInfo{
+ name: vindexName,
+ targetKeyspace: targetKeyspace,
+ targetTableName: targetTableName,
+ fromCols: vindexFromCols,
+ toCol: vindexToCol,
+ ignoreNulls: ignoreNulls,
+ }, nil
+}
+
+func (lv *lookupVindex) getTargetAndSourceVSchema(ctx context.Context, sourceKeyspace string, targetKeyspace string) (sourceVSchema *vschemapb.Keyspace, targetVSchema *vschemapb.Keyspace, err error) {
+ sourceVSchema, err = lv.ts.GetVSchema(ctx, sourceKeyspace)
+ if err != nil {
+ return nil, nil, err
+ }
+ if sourceVSchema.Vindexes == nil {
+ sourceVSchema.Vindexes = make(map[string]*vschemapb.Vindex)
+ }
+ // If source and target keyspaces are the same, make vschemas point
+ // to the same object.
+ if sourceKeyspace == targetKeyspace {
+ targetVSchema = sourceVSchema
+ } else {
+ targetVSchema, err = lv.ts.GetVSchema(ctx, targetKeyspace)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ if targetVSchema.Vindexes == nil {
+ targetVSchema.Vindexes = make(map[string]*vschemapb.Vindex)
+ }
+ if targetVSchema.Tables == nil {
+ targetVSchema.Tables = make(map[string]*vschemapb.Table)
+ }
+
+ return sourceVSchema, targetVSchema, nil
+}
+
+func getSourceTable(specs *vschemapb.Keyspace, targetTableName string, fromCols []string) (sourceTable *vschemapb.Table, sourceTableName string, err error) {
+ // Loop executes once or twice.
+ for tableName, table := range specs.Tables {
+ if len(table.ColumnVindexes) != 1 {
+ return nil, "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "exactly one ColumnVindex must be specified for the %s table",
+ tableName)
+ }
+
+ if tableName != targetTableName { // This is the source table.
+ sourceTableName = tableName
+ sourceTable = table
+ continue
+ }
+ // This is a primary vindex definition for the target table
+ // which allows you to override the vindex type used.
+ var vindexCols []string
+ if len(table.ColumnVindexes[0].Columns) != 0 {
+ vindexCols = table.ColumnVindexes[0].Columns
+ } else {
+ if table.ColumnVindexes[0].Column == "" {
+ return nil, "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "at least one column must be specified in ColumnVindexes for the %s table",
+ tableName)
+ }
+ vindexCols = []string{table.ColumnVindexes[0].Column}
+ }
+ if !slices.Equal(vindexCols, fromCols) {
+ return nil, "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "columns in the lookup table %s primary vindex (%s) don't match the 'from' columns specified (%s)",
+ tableName, strings.Join(vindexCols, ","), strings.Join(fromCols, ","))
+ }
+ }
+ return sourceTable, sourceTableName, nil
+}
+
+func (lv *lookupVindex) generateCreateDDLStatement(tableSchema *tabletmanagerdatapb.SchemaDefinition, sourceVindexColumns []string, vInfo *vindexInfo, vindex *vschemapb.Vindex) (string, error) {
+ lines := strings.Split(tableSchema.TableDefinitions[0].Schema, "\n")
+ if len(lines) < 3 {
+ // Should never happen.
+ return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "schema looks incorrect: %s, expecting at least four lines",
+ tableSchema.TableDefinitions[0].Schema)
+ }
+
+ var modified []string
+ modified = append(modified, strings.Replace(lines[0], vInfo.sourceTableName, vInfo.targetTableName, 1))
+ for i := range sourceVindexColumns {
+ line, err := generateColDef(lines, sourceVindexColumns[i], vInfo.fromCols[i])
+ if err != nil {
+ return "", err
+ }
+ modified = append(modified, line)
+ }
+
+ if vindex.Params["data_type"] == "" || strings.EqualFold(vindex.Type, "consistent_lookup_unique") || strings.EqualFold(vindex.Type, "consistent_lookup") {
+ modified = append(modified, fmt.Sprintf(" %s varbinary(128),", sqlescape.EscapeID(vInfo.toCol)))
+ } else {
+ modified = append(modified, fmt.Sprintf(" %s %s,", sqlescape.EscapeID(vInfo.toCol), sqlescape.EscapeID(vindex.Params["data_type"])))
+ }
+
+ buf := sqlparser.NewTrackedBuffer(nil)
+ fmt.Fprintf(buf, " PRIMARY KEY (")
+ prefix := ""
+ for _, col := range vInfo.fromCols {
+ fmt.Fprintf(buf, "%s%s", prefix, sqlescape.EscapeID(col))
+ prefix = ", "
+ }
+ fmt.Fprintf(buf, ")")
+
+ modified = append(modified, buf.String())
+ modified = append(modified, ")")
+ createDDL := strings.Join(modified, "\n")
+
+ // Confirm that our DDL is valid before we create anything.
+ if _, err := lv.parser.ParseStrictDDL(createDDL); err != nil {
+ return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error: %v; invalid lookup table definition generated: %s",
+ err, createDDL)
+ }
+
+ return createDDL, nil
+}
+
+func generateMaterializeQuery(vInfo *vindexInfo, vindex *vschemapb.Vindex, sourceVindexColumns []string) string {
+ buf := sqlparser.NewTrackedBuffer(nil)
+ buf.Myprintf("select ")
+ for i := range vInfo.fromCols {
+ buf.Myprintf("%s as %s, ", sqlparser.String(sqlparser.NewIdentifierCI(sourceVindexColumns[i])), sqlparser.String(sqlparser.NewIdentifierCI(vInfo.fromCols[i])))
+ }
+ if strings.EqualFold(vInfo.toCol, "keyspace_id") || strings.EqualFold(vindex.Type, "consistent_lookup_unique") || strings.EqualFold(vindex.Type, "consistent_lookup") {
+ buf.Myprintf("keyspace_id() as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vInfo.toCol)))
+ } else {
+ buf.Myprintf("%s as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vInfo.toCol)), sqlparser.String(sqlparser.NewIdentifierCI(vInfo.toCol)))
+ }
+ buf.Myprintf("from %s", sqlparser.String(sqlparser.NewIdentifierCS(vInfo.sourceTableName)))
+ if vInfo.ignoreNulls {
+ buf.Myprintf(" where ")
+ lastValIdx := len(vInfo.fromCols) - 1
+ for i := range vInfo.fromCols {
+ buf.Myprintf("%s is not null", sqlparser.String(sqlparser.NewIdentifierCI(vInfo.fromCols[i])))
+ if i != lastValIdx {
+ buf.Myprintf(" and ")
+ }
+ }
+ }
+ if vindex.Owner != "" {
+ // Only backfill.
+ buf.Myprintf(" group by ")
+ for i := range vInfo.fromCols {
+ buf.Myprintf("%s, ", sqlparser.String(sqlparser.NewIdentifierCI(vInfo.fromCols[i])))
+ }
+ buf.Myprintf("%s", sqlparser.String(sqlparser.NewIdentifierCI(vInfo.toCol)))
+ }
+ return buf.String()
+}
+
+// validateSourceTableAndGetVindexColumns validates input table and vindex consistency, and returns sourceVindexColumns.
+func validateSourceTableAndGetVindexColumns(vInfo *vindexInfo, vindex *vschemapb.Vindex, keyspace string) (sourceVindexColumns []string, err error) {
+ if vInfo.sourceTable == nil || len(vInfo.sourceTable.ColumnVindexes) != 1 {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No ColumnVindex found for the owner table (%s) in the %s keyspace",
+ vInfo.sourceTable, keyspace)
+ }
+ if vInfo.sourceTable.ColumnVindexes[0].Name != vInfo.name {
+ return nil,
+ vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ColumnVindex name (%s) must match vindex name (%s)",
+ vInfo.sourceTable.ColumnVindexes[0].Name, vInfo.name)
+ }
+ if vindex.Owner != "" && vindex.Owner != vInfo.sourceTableName {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex owner (%s) must match table name (%s)",
+ vindex.Owner, vInfo.sourceTableName)
+ }
+ if len(vInfo.sourceTable.ColumnVindexes[0].Columns) != 0 {
+ sourceVindexColumns = vInfo.sourceTable.ColumnVindexes[0].Columns
+ } else {
+ if vInfo.sourceTable.ColumnVindexes[0].Column == "" {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "at least one column must be specified in ColumnVindexes for the %s table",
+ vInfo.sourceTableName)
+ }
+ sourceVindexColumns = []string{vInfo.sourceTable.ColumnVindexes[0].Column}
+ }
+ if len(sourceVindexColumns) != len(vInfo.fromCols) {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "length of table columns (%d) differs from length of vindex columns (%d)",
+ len(sourceVindexColumns), len(vInfo.fromCols))
+ }
+
+ return sourceVindexColumns, nil
+}
+
+func validateNonConflictingColumnVindex(sourceVSchemaTable *vschemapb.Table, vInfo *vindexInfo, sourceVindexColumns []string, keyspace string) error {
+ for _, colVindex := range sourceVSchemaTable.ColumnVindexes {
+ // For a conflict, the vindex name and column should match.
+ if colVindex.Name != vInfo.name {
+ continue
+ }
+ var colNames []string
+ if len(colVindex.Columns) == 0 {
+ colNames = []string{colVindex.Column}
+ } else {
+ colNames = colVindex.Columns
+ }
+ // If this is the exact same definition then we can use the existing one. If they
+ // are not the same then they are two distinct conflicting vindexes and we should
+ // not proceed.
+ if !slices.Equal(colNames, sourceVindexColumns) {
+ return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting ColumnVindex on column(s) %s in table %s already exists in the %s keyspace",
+ strings.Join(colNames, ","), vInfo.sourceTableName, keyspace)
+ }
+ }
+ return nil
+}
+
+func generateColDef(lines []string, sourceVindexCol, vindexFromCol string) (string, error) {
+ source := sqlescape.EscapeID(sourceVindexCol)
+ target := sqlescape.EscapeID(vindexFromCol)
+
+ for _, line := range lines[1:] {
+ if strings.Contains(line, source) {
+ line = strings.Replace(line, source, target, 1)
+ line = strings.Replace(line, " AUTO_INCREMENT", "", 1)
+ line = strings.Replace(line, " DEFAULT NULL", "", 1)
+ // Ensure that the column definition ends with a comma as we will
+ // be appending the TO column and PRIMARY KEY definitions. If the
+ // souce column here was the last entity defined in the source
+ // table's definition then it will not already have the comma.
+ if !strings.HasSuffix(strings.TrimSpace(line), ",") {
+ line += ","
+ }
+ return line, nil
+ }
+ }
+ return "", fmt.Errorf("column %s not found in schema %v", sourceVindexCol, lines)
+}
diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go
index a583a101186..e430f740c1f 100644
--- a/go/vt/vtctl/workflow/materializer_test.go
+++ b/go/vt/vtctl/workflow/materializer_test.go
@@ -1515,7 +1515,8 @@ func TestCreateLookupVindexCreateDDL(t *testing.T) {
setStartingVschema()
}()
}
- outms, _, _, cancelFunc, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.SourceKeyspace, tcase.specs, false)
+ lv := newLookupVindex(env.ws)
+ outms, _, _, cancelFunc, err := lv.prepareCreate(ctx, "workflow", ms.SourceKeyspace, tcase.specs, false)
if tcase.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tcase.err, "prepareCreateLookup(%s) err: %v, does not contain %v", tcase.description, err, tcase.err)
@@ -1763,7 +1764,8 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) {
t.Fatal(err)
}
- _, got, _, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.SourceKeyspace, specs, false)
+ lv := newLookupVindex(env.ws)
+ _, got, _, _, err := lv.prepareCreate(ctx, "workflow", ms.SourceKeyspace, specs, false)
require.NoError(t, err)
if !proto.Equal(got, tcase.out) {
t.Errorf("%s: got:\n%v, want\n%v", tcase.description, got, tcase.out)
@@ -1984,32 +1986,35 @@ func TestCreateLookupVindexTargetVSchema(t *testing.T) {
err: "type SET is not recommended for a vindex",
}}
for _, tcase := range testcases {
- env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{
- TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{
- Fields: []*querypb.Field{{
- Name: "col2",
- Type: tcase.sourceFieldType,
+ t.Run(tcase.description, func(t *testing.T) {
+ env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{
+ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{
+ Fields: []*querypb.Field{{
+ Name: "col2",
+ Type: tcase.sourceFieldType,
+ }},
+ Schema: sourceSchema,
}},
- Schema: sourceSchema,
- }},
- }
- specs.Vindexes["v"].Params["table"] = fmt.Sprintf("%s.%s", ms.TargetKeyspace, tcase.targetTable)
- if err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, tcase.targetVSchema); err != nil {
- t.Fatal(err)
- }
+ }
+ specs.Vindexes["v"].Params["table"] = fmt.Sprintf("%s.%s", ms.TargetKeyspace, tcase.targetTable)
+ if err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, tcase.targetVSchema); err != nil {
+ t.Fatal(err)
+ }
- _, _, got, cancelFunc, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.SourceKeyspace, specs, false)
- if tcase.err != "" {
- if err == nil || !strings.Contains(err.Error(), tcase.err) {
- t.Errorf("prepareCreateLookup(%s) err: %v, must contain %v", tcase.description, err, tcase.err)
+ lv := newLookupVindex(env.ws)
+ _, _, got, cancelFunc, err := lv.prepareCreate(ctx, "workflow", ms.SourceKeyspace, specs, false)
+ if tcase.err != "" {
+ if err == nil || !strings.Contains(err.Error(), tcase.err) {
+ t.Errorf("prepareCreateLookup(%s) err: %v, must contain %v", tcase.description, err, tcase.err)
+ }
+ return
}
- continue
- }
- require.NoError(t, err)
- // withTable is a vschema that already contains the table and thus
- // we don't make any vschema changes and there's nothing to cancel.
- require.True(t, (cancelFunc != nil) == (tcase.targetVSchema != withTable))
- utils.MustMatch(t, tcase.out, got, tcase.description)
+ require.NoError(t, err)
+ // withTable is a vschema that already contains the table and thus
+ // we don't make any vschema changes and there's nothing to cancel.
+ require.True(t, (cancelFunc != nil) == (tcase.targetVSchema != withTable))
+ utils.MustMatch(t, tcase.out, got, tcase.description)
+ })
}
}
@@ -2119,7 +2124,8 @@ func TestCreateLookupVindexSameKeyspace(t *testing.T) {
t.Fatal(err)
}
- _, got, _, _, err := env.ws.prepareCreateLookup(ctx, "keyspace", ms.TargetKeyspace, specs, false)
+ lv := newLookupVindex(env.ws)
+ _, got, _, _, err := lv.prepareCreate(ctx, "keyspace", ms.TargetKeyspace, specs, false)
require.NoError(t, err)
if !proto.Equal(got, want) {
t.Errorf("same keyspace: got:\n%v, want\n%v", got, want)
@@ -2245,7 +2251,8 @@ func TestCreateCustomizedVindex(t *testing.T) {
t.Fatal(err)
}
- _, got, _, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.TargetKeyspace, specs, false)
+ lv := newLookupVindex(env.ws)
+ _, got, _, _, err := lv.prepareCreate(ctx, "workflow", ms.TargetKeyspace, specs, false)
require.NoError(t, err)
if !proto.Equal(got, want) {
t.Errorf("customize create lookup error same: got:\n%v, want\n%v", got, want)
@@ -2363,7 +2370,8 @@ func TestCreateLookupVindexIgnoreNulls(t *testing.T) {
t.Fatal(err)
}
- ms, ks, _, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.TargetKeyspace, specs, false)
+ lv := newLookupVindex(env.ws)
+ ms, ks, _, _, err := lv.prepareCreate(ctx, "workflow", ms.TargetKeyspace, specs, false)
require.NoError(t, err)
if !proto.Equal(wantKs, ks) {
t.Errorf("unexpected keyspace value: got:\n%v, want\n%v", ks, wantKs)
@@ -2443,11 +2451,12 @@ func TestStopAfterCopyFlag(t *testing.T) {
t.Fatal(err)
}
- ms1, _, _, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.TargetKeyspace, specs, false)
+ lv := newLookupVindex(env.ws)
+ ms1, _, _, _, err := lv.prepareCreate(ctx, "workflow", ms.TargetKeyspace, specs, false)
require.NoError(t, err)
require.Equal(t, ms1.StopAfterCopy, true)
- ms2, _, _, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.TargetKeyspace, specs, true)
+ ms2, _, _, _, err := lv.prepareCreate(ctx, "workflow", ms.TargetKeyspace, specs, true)
require.NoError(t, err)
require.Equal(t, ms2.StopAfterCopy, false)
}
diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go
index baea602b7a4..8123416eb41 100644
--- a/go/vt/vtctl/workflow/server.go
+++ b/go/vt/vtctl/workflow/server.go
@@ -29,18 +29,15 @@ import (
"time"
"github.com/google/uuid"
- "golang.org/x/exp/maps"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/proto"
"vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/protoutil"
"vitess.io/vitess/go/ptr"
- "vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/trace"
"vitess.io/vitess/go/vt/concurrency"
@@ -49,7 +46,6 @@ import (
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/mysqlctl/tmutils"
- "vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -571,7 +567,9 @@ func (s *Server) LookupVindexCreate(ctx context.Context, req *vtctldatapb.Lookup
span.Annotate("cells", req.Cells)
span.Annotate("tablet_types", req.TabletTypes)
- ms, sourceVSchema, targetVSchema, cancelFunc, err := s.prepareCreateLookup(ctx, req.Workflow, req.Keyspace, req.Vindex, req.ContinueAfterCopyWithOwner)
+ lv := newLookupVindex(s)
+
+ ms, sourceVSchema, targetVSchema, cancelFunc, err := lv.prepareCreate(ctx, req.Workflow, req.Keyspace, req.Vindex, req.ContinueAfterCopyWithOwner)
if err != nil {
return nil, err
}
@@ -715,11 +713,8 @@ func (s *Server) Materialize(ctx context.Context, ms *vtctldatapb.MaterializeSet
cells[i] = strings.TrimSpace(cells[i])
}
- switch {
- case len(ms.ReferenceTables) == 0 && len(ms.TableSettings) == 0:
- return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "either --table-settings or --reference-tables must be specified")
- case len(ms.ReferenceTables) > 0 && len(ms.TableSettings) > 0:
- return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot specify both --table-settings and --reference-tables")
+ if err := validateMaterializeSettings(ms); err != nil {
+ return err
}
for _, table := range ms.ReferenceTables {
@@ -746,6 +741,17 @@ func (s *Server) Materialize(ctx context.Context, ms *vtctldatapb.MaterializeSet
return mz.startStreams(ctx)
}
+func validateMaterializeSettings(ms *vtctldatapb.MaterializeSettings) error {
+ switch {
+ case len(ms.ReferenceTables) == 0 && len(ms.TableSettings) == 0:
+ return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "either --table-settings or --reference-tables must be specified")
+ case len(ms.ReferenceTables) > 0 && len(ms.TableSettings) > 0:
+ return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot specify both --table-settings and --reference-tables")
+ }
+
+ return nil
+}
+
// MoveTablesCreate is part of the vtctlservicepb.VtctldServer interface.
// It passes the embedded TabletRequest object to the given keyspace's
// target primary tablets that will be executing the workflow.
@@ -3408,413 +3414,6 @@ func fillStringTemplate(tmpl string, vars any) (string, error) {
return data.String(), nil
}
-// prepareCreateLookup performs the preparatory steps for creating a
-// Lookup Vindex.
-func (s *Server) prepareCreateLookup(ctx context.Context, workflow, keyspace string, specs *vschemapb.Keyspace, continueAfterCopyWithOwner bool) (
- ms *vtctldatapb.MaterializeSettings, sourceVSchema, targetVSchema *vschemapb.Keyspace, cancelFunc func() error, err error) {
- // Important variables are pulled out here.
- var (
- vindexName string
- vindex *vschemapb.Vindex
- targetKeyspace string
- targetTableName string
- vindexFromCols []string
- vindexToCol string
- vindexIgnoreNulls bool
-
- sourceTableName string
- // sourceTable is the supplied table info.
- sourceTable *vschemapb.Table
- // sourceVSchemaTable is the table info present in the vschema.
- sourceVSchemaTable *vschemapb.Table
- // sourceVindexColumns are computed from the input sourceTable.
- sourceVindexColumns []string
-
- // Target table info.
- createDDL string
- materializeQuery string
- )
-
- // Validate input vindex.
- if specs == nil {
- return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no vindex provided")
- }
- if len(specs.Vindexes) != 1 {
- return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "only one vindex must be specified")
- }
- vindexName = maps.Keys(specs.Vindexes)[0]
- vindex = maps.Values(specs.Vindexes)[0]
- if !strings.Contains(vindex.Type, "lookup") {
- return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex %s is not a lookup type", vindex.Type)
- }
- targetKeyspace, targetTableName, err = s.env.Parser().ParseTable(vindex.Params["table"])
- if err != nil || targetKeyspace == "" {
- return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex table name (%s) must be in the form .", vindex.Params["table"])
- }
- vindexFromCols = strings.Split(vindex.Params["from"], ",")
- for i, col := range vindexFromCols {
- vindexFromCols[i] = strings.TrimSpace(col)
- }
- if strings.Contains(vindex.Type, "unique") {
- if len(vindexFromCols) != 1 {
- return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unique vindex 'from' should have only one column")
- }
- }
- vindexToCol = vindex.Params["to"]
- // Make the vindex write_only. If one exists already in the vschema,
- // it will need to match this vindex exactly, including the write_only setting.
- vindex.Params["write_only"] = "true"
- // See if we can create the vindex without errors.
- if _, err := vindexes.CreateVindex(vindex.Type, vindexName, vindex.Params); err != nil {
- return nil, nil, nil, nil, err
- }
- if ignoreNullsStr, ok := vindex.Params["ignore_nulls"]; ok {
- // This mirrors the behavior of vindexes.boolFromMap().
- switch ignoreNullsStr {
- case "true":
- vindexIgnoreNulls = true
- case "false":
- vindexIgnoreNulls = false
- default:
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls (%s) value must be 'true' or 'false'",
- ignoreNullsStr)
- }
- }
-
- // Validate input table.
- if len(specs.Tables) < 1 || len(specs.Tables) > 2 {
- return nil, nil, nil, nil, fmt.Errorf("one or two tables must be specified")
- }
- // Loop executes once or twice.
- for tableName, table := range specs.Tables {
- if len(table.ColumnVindexes) != 1 {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "exactly one ColumnVindex must be specified for the %s table",
- tableName)
- }
- if tableName != targetTableName { // This is the source table.
- sourceTableName = tableName
- sourceTable = table
- continue
- }
- // This is a primary vindex definition for the target table
- // which allows you to override the vindex type used.
- var vindexCols []string
- if len(table.ColumnVindexes[0].Columns) != 0 {
- vindexCols = table.ColumnVindexes[0].Columns
- } else {
- if table.ColumnVindexes[0].Column == "" {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "at least one column must be specified in ColumnVindexes for the %s table",
- tableName)
- }
- vindexCols = []string{table.ColumnVindexes[0].Column}
- }
- if !slices.Equal(vindexCols, vindexFromCols) {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "columns in the lookup table %s primary vindex (%s) don't match the 'from' columns specified (%s)",
- tableName, strings.Join(vindexCols, ","), strings.Join(vindexFromCols, ","))
- }
- }
-
- // Validate input table and vindex consistency.
- if sourceTable == nil || len(sourceTable.ColumnVindexes) != 1 {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No ColumnVindex found for the owner table (%s) in the %s keyspace",
- sourceTable, keyspace)
- }
- if sourceTable.ColumnVindexes[0].Name != vindexName {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ColumnVindex name (%s) must match vindex name (%s)",
- sourceTable.ColumnVindexes[0].Name, vindexName)
- }
- if vindex.Owner != "" && vindex.Owner != sourceTableName {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex owner (%s) must match table name (%s)",
- vindex.Owner, sourceTableName)
- }
- if len(sourceTable.ColumnVindexes[0].Columns) != 0 {
- sourceVindexColumns = sourceTable.ColumnVindexes[0].Columns
- } else {
- if sourceTable.ColumnVindexes[0].Column == "" {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "at least one column must be specified in ColumnVindexes for the %s table",
- sourceTableName)
- }
- sourceVindexColumns = []string{sourceTable.ColumnVindexes[0].Column}
- }
- if len(sourceVindexColumns) != len(vindexFromCols) {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "length of table columns (%d) differs from length of vindex columns (%d)",
- len(sourceVindexColumns), len(vindexFromCols))
- }
-
- // Validate against source vschema.
- sourceVSchema, err = s.ts.GetVSchema(ctx, keyspace)
- if err != nil {
- return nil, nil, nil, nil, err
- }
- if sourceVSchema.Vindexes == nil {
- sourceVSchema.Vindexes = make(map[string]*vschemapb.Vindex)
- }
- // If source and target keyspaces are the same, make vschemas point
- // to the same object.
- if keyspace == targetKeyspace {
- targetVSchema = sourceVSchema
- } else {
- targetVSchema, err = s.ts.GetVSchema(ctx, targetKeyspace)
- if err != nil {
- return nil, nil, nil, nil, err
- }
- }
- if targetVSchema.Vindexes == nil {
- targetVSchema.Vindexes = make(map[string]*vschemapb.Vindex)
- }
- if targetVSchema.Tables == nil {
- targetVSchema.Tables = make(map[string]*vschemapb.Table)
- }
- if existing, ok := sourceVSchema.Vindexes[vindexName]; ok {
- if !proto.Equal(existing, vindex) { // If the exact same vindex already exists then we can re-use it
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INTERNAL, "a conflicting vindex named %s already exists in the %s keyspace",
- vindexName, keyspace)
- }
- }
- sourceVSchemaTable = sourceVSchema.Tables[sourceTableName]
- if sourceVSchemaTable == nil && !schema.IsInternalOperationTableName(sourceTableName) {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table %s not found in the %s keyspace", sourceTableName, keyspace)
- }
- for _, colVindex := range sourceVSchemaTable.ColumnVindexes {
- // For a conflict, the vindex name and column should match.
- if colVindex.Name != vindexName {
- continue
- }
- var colNames []string
- if len(colVindex.Columns) == 0 {
- colNames = []string{colVindex.Column}
- } else {
- colNames = colVindex.Columns
- }
- // If this is the exact same definition then we can use the existing one. If they
- // are not the same then they are two distinct conflicting vindexes and we should
- // not proceed.
- if !slices.Equal(colNames, sourceVindexColumns) {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting ColumnVindex on column(s) %s in table %s already exists in the %s keyspace",
- strings.Join(colNames, ","), sourceTableName, keyspace)
- }
- }
-
- // Validate against source schema.
- sourceShards, err := s.ts.GetServingShards(ctx, keyspace)
- if err != nil {
- return nil, nil, nil, nil, err
- }
- onesource := sourceShards[0]
- if onesource.PrimaryAlias == nil {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INTERNAL, "source shard %s has no primary", onesource.ShardName())
- }
- req := &tabletmanagerdatapb.GetSchemaRequest{Tables: []string{sourceTableName}}
- tableSchema, err := schematools.GetSchema(ctx, s.ts, s.tmc, onesource.PrimaryAlias, req)
- if err != nil {
- return nil, nil, nil, nil, err
- }
- if len(tableSchema.TableDefinitions) != 1 {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected number of tables (%d) returned from %s schema",
- len(tableSchema.TableDefinitions), keyspace)
- }
-
- // Generate "create table" statement.
- lines := strings.Split(tableSchema.TableDefinitions[0].Schema, "\n")
- if len(lines) < 3 {
- // Should never happen.
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INTERNAL, "schema looks incorrect: %s, expecting at least four lines",
- tableSchema.TableDefinitions[0].Schema)
- }
- var modified []string
- modified = append(modified, strings.Replace(lines[0], sourceTableName, targetTableName, 1))
- for i := range sourceVindexColumns {
- line, err := generateColDef(lines, sourceVindexColumns[i], vindexFromCols[i])
- if err != nil {
- return nil, nil, nil, nil, err
- }
- modified = append(modified, line)
- }
-
- if vindex.Params["data_type"] == "" || strings.EqualFold(vindex.Type, "consistent_lookup_unique") || strings.EqualFold(vindex.Type, "consistent_lookup") {
- modified = append(modified, fmt.Sprintf(" %s varbinary(128),", sqlescape.EscapeID(vindexToCol)))
- } else {
- modified = append(modified, fmt.Sprintf(" %s %s,", sqlescape.EscapeID(vindexToCol), sqlescape.EscapeID(vindex.Params["data_type"])))
- }
- buf := sqlparser.NewTrackedBuffer(nil)
- fmt.Fprintf(buf, " PRIMARY KEY (")
- prefix := ""
- for _, col := range vindexFromCols {
- fmt.Fprintf(buf, "%s%s", prefix, sqlescape.EscapeID(col))
- prefix = ", "
- }
- fmt.Fprintf(buf, ")")
- modified = append(modified, buf.String())
- modified = append(modified, ")")
- createDDL = strings.Join(modified, "\n")
- // Confirm that our DDL is valid before we create anything.
- if _, err = s.env.Parser().ParseStrictDDL(createDDL); err != nil {
- return nil, nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error: %v; invalid lookup table definition generated: %s",
- err, createDDL)
- }
-
- // Generate vreplication query.
- buf = sqlparser.NewTrackedBuffer(nil)
- buf.Myprintf("select ")
- for i := range vindexFromCols {
- buf.Myprintf("%s as %s, ", sqlparser.String(sqlparser.NewIdentifierCI(sourceVindexColumns[i])), sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i])))
- }
- if strings.EqualFold(vindexToCol, "keyspace_id") || strings.EqualFold(vindex.Type, "consistent_lookup_unique") || strings.EqualFold(vindex.Type, "consistent_lookup") {
- buf.Myprintf("keyspace_id() as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol)))
- } else {
- buf.Myprintf("%s as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol)), sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol)))
- }
- buf.Myprintf("from %s", sqlparser.String(sqlparser.NewIdentifierCS(sourceTableName)))
- if vindexIgnoreNulls {
- buf.Myprintf(" where ")
- lastValIdx := len(vindexFromCols) - 1
- for i := range vindexFromCols {
- buf.Myprintf("%s is not null", sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i])))
- if i != lastValIdx {
- buf.Myprintf(" and ")
- }
- }
- }
- if vindex.Owner != "" {
- // Only backfill.
- buf.Myprintf(" group by ")
- for i := range vindexFromCols {
- buf.Myprintf("%s, ", sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i])))
- }
- buf.Myprintf("%s", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol)))
- }
- materializeQuery = buf.String()
-
- // Save a copy of the original vschema if we modify it and need to provide
- // a cancelFunc.
- ogTargetVSchema := targetVSchema.CloneVT()
- targetChanged := false
-
- // Update targetVSchema.
- targetTable := specs.Tables[targetTableName]
- if targetVSchema.Sharded {
- // Choose a primary vindex type for the lookup table based on the source
- // definition if one was not explicitly specified.
- var targetVindexType string
- var targetVindex *vschemapb.Vindex
- for _, field := range tableSchema.TableDefinitions[0].Fields {
- if sourceVindexColumns[0] == field.Name {
- if targetTable != nil && len(targetTable.ColumnVindexes) > 0 {
- targetVindexType = targetTable.ColumnVindexes[0].Name
- }
- if targetVindexType == "" {
- targetVindexType, err = vindexes.ChooseVindexForType(field.Type)
- if err != nil {
- return nil, nil, nil, nil, err
- }
- }
- targetVindex = &vschemapb.Vindex{
- Type: targetVindexType,
- }
- break
- }
- }
- if targetVindex == nil {
- // Unreachable. We validated column names when generating the DDL.
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INTERNAL, "column %s not found in target schema %s",
- sourceVindexColumns[0], tableSchema.TableDefinitions[0].Schema)
- }
- if existing, ok := targetVSchema.Vindexes[targetVindexType]; ok {
- if !proto.Equal(existing, targetVindex) {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting vindex named %v already exists in the %s keyspace",
- targetVindexType, targetKeyspace)
- }
- } else {
- targetVSchema.Vindexes[targetVindexType] = targetVindex
- targetChanged = true
- }
-
- targetTable = &vschemapb.Table{
- ColumnVindexes: []*vschemapb.ColumnVindex{{
- Column: vindexFromCols[0],
- Name: targetVindexType,
- }},
- }
- } else {
- targetTable = &vschemapb.Table{}
- }
- if existing, ok := targetVSchema.Tables[targetTableName]; ok {
- if !proto.Equal(existing, targetTable) {
- return nil, nil, nil, nil,
- vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting table named %s already exists in the %s vschema",
- targetTableName, targetKeyspace)
- }
- } else {
- targetVSchema.Tables[targetTableName] = targetTable
- targetChanged = true
- }
-
- if targetChanged {
- cancelFunc = func() error {
- // Restore the original target vschema.
- return s.ts.SaveVSchema(ctx, targetKeyspace, ogTargetVSchema)
- }
- }
-
- ms = &vtctldatapb.MaterializeSettings{
- Workflow: workflow,
- MaterializationIntent: vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX,
- SourceKeyspace: keyspace,
- TargetKeyspace: targetKeyspace,
- StopAfterCopy: vindex.Owner != "" && !continueAfterCopyWithOwner,
- TableSettings: []*vtctldatapb.TableMaterializeSettings{{
- TargetTable: targetTableName,
- SourceExpression: materializeQuery,
- CreateDdl: createDDL,
- }},
- }
-
- // Update sourceVSchema
- sourceVSchema.Vindexes[vindexName] = vindex
- sourceVSchemaTable.ColumnVindexes = append(sourceVSchemaTable.ColumnVindexes, sourceTable.ColumnVindexes[0])
-
- return ms, sourceVSchema, targetVSchema, cancelFunc, nil
-}
-
-func generateColDef(lines []string, sourceVindexCol, vindexFromCol string) (string, error) {
- source := sqlescape.EscapeID(sourceVindexCol)
- target := sqlescape.EscapeID(vindexFromCol)
-
- for _, line := range lines[1:] {
- if strings.Contains(line, source) {
- line = strings.Replace(line, source, target, 1)
- line = strings.Replace(line, " AUTO_INCREMENT", "", 1)
- line = strings.Replace(line, " DEFAULT NULL", "", 1)
- // Ensure that the column definition ends with a comma as we will
- // be appending the TO column and PRIMARY KEY definitions. If the
- // souce column here was the last entity defined in the source
- // table's definition then it will not already have the comma.
- if !strings.HasSuffix(strings.TrimSpace(line), ",") {
- line += ","
- }
- return line, nil
- }
- }
- return "", fmt.Errorf("column %s not found in schema %v", sourceVindexCol, lines)
-}
-
func (s *Server) MigrateCreate(ctx context.Context, req *vtctldatapb.MigrateCreateRequest) (*vtctldatapb.WorkflowStatusResponse, error) {
moveTablesCreateRequest := &vtctldatapb.MoveTablesCreateRequest{
Workflow: req.Workflow,
diff --git a/go/vt/vtctl/workflow/vdiff.go b/go/vt/vtctl/workflow/vdiff.go
new file mode 100644
index 00000000000..6be5fe3c3b5
--- /dev/null
+++ b/go/vt/vtctl/workflow/vdiff.go
@@ -0,0 +1,280 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "encoding/json"
+ "math"
+ "sort"
+ "strings"
+ "time"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+// TableSummary aggregates the current state of the table diff from all shards.
+type TableSummary struct {
+ TableName string
+ State vdiff.VDiffState
+ RowsCompared int64
+ MatchingRows int64
+ MismatchedRows int64
+ ExtraRowsSource int64
+ ExtraRowsTarget int64
+ LastUpdated string `json:"LastUpdated,omitempty"`
+}
+
+// Summary aggregates the current state of the vdiff from all shards.
+type Summary struct {
+ Workflow, Keyspace string
+ State vdiff.VDiffState
+ UUID string
+ RowsCompared int64
+ HasMismatch bool
+ Shards string
+ StartedAt string `json:"StartedAt,omitempty"`
+ CompletedAt string `json:"CompletedAt,omitempty"`
+ TableSummaryMap map[string]TableSummary `json:"TableSummary,omitempty"`
+ // This is keyed by table name and then by shard name.
+ Reports map[string]map[string]vdiff.DiffReport `json:"Reports,omitempty"`
+ // This is keyed by shard name.
+ Errors map[string]string `json:"Errors,omitempty"`
+ Progress *vdiff.ProgressReport `json:"Progress,omitempty"`
+}
+
+// BuildSummary generates a summary from a vdiff show response.
+func BuildSummary(keyspace, workflow, uuid string, resp *vtctldatapb.VDiffShowResponse, verbose bool) (*Summary, error) {
+ summary := &Summary{
+ Workflow: workflow,
+ Keyspace: keyspace,
+ UUID: uuid,
+ State: vdiff.UnknownState,
+ RowsCompared: 0,
+ StartedAt: "",
+ CompletedAt: "",
+ HasMismatch: false,
+ Shards: "",
+ Reports: make(map[string]map[string]vdiff.DiffReport),
+ Errors: make(map[string]string),
+ Progress: nil,
+ }
+
+ var tableSummaryMap map[string]TableSummary
+ var reports map[string]map[string]vdiff.DiffReport
+ // Keep a tally of the states across all tables in all shards.
+ tableStateCounts := map[vdiff.VDiffState]int{
+ vdiff.UnknownState: 0,
+ vdiff.PendingState: 0,
+ vdiff.StartedState: 0,
+ vdiff.StoppedState: 0,
+ vdiff.ErrorState: 0,
+ vdiff.CompletedState: 0,
+ }
+ // Keep a tally of the summary states across all shards.
+ shardStateCounts := map[vdiff.VDiffState]int{
+ vdiff.UnknownState: 0,
+ vdiff.PendingState: 0,
+ vdiff.StartedState: 0,
+ vdiff.StoppedState: 0,
+ vdiff.ErrorState: 0,
+ vdiff.CompletedState: 0,
+ }
+ // Keep a tally of the approximate total rows to process as we'll use this for our progress
+ // report.
+ totalRowsToCompare := int64(0)
+ var shards []string
+ for shard, resp := range resp.TabletResponses {
+ first := true
+ if resp != nil && resp.Output != nil {
+ shards = append(shards, shard)
+ qr := sqltypes.Proto3ToResult(resp.Output)
+ if tableSummaryMap == nil {
+ tableSummaryMap = make(map[string]TableSummary, 0)
+ reports = make(map[string]map[string]vdiff.DiffReport, 0)
+ }
+ for _, row := range qr.Named().Rows {
+ // Update the global VDiff summary based on the per shard level summary.
+ // Since these values will be the same for all subsequent rows we only use
+ // the first row.
+ if first {
+ first = false
+ // Our timestamps are strings in `2022-06-26 20:43:25` format so we sort
+ // them lexicographically.
+ // We should use the earliest started_at across all shards.
+ if sa := row.AsString("started_at", ""); summary.StartedAt == "" || sa < summary.StartedAt {
+ summary.StartedAt = sa
+ }
+ // And we should use the latest completed_at across all shards.
+ if ca := row.AsString("completed_at", ""); summary.CompletedAt == "" || ca > summary.CompletedAt {
+ summary.CompletedAt = ca
+ }
+ // If we had an error on the shard, then let's add that to the summary.
+ if le := row.AsString("last_error", ""); le != "" {
+ summary.Errors[shard] = le
+ }
+ // Keep track of how many shards are marked as a specific state. We check
+ // this combined with the shard.table states to determine the VDiff summary
+ // state.
+ shardStateCounts[vdiff.VDiffState(strings.ToLower(row.AsString("vdiff_state", "")))]++
+ }
+
+ // Global VDiff summary updates that take into account the per table details
+ // per shard.
+ {
+ summary.RowsCompared += row.AsInt64("rows_compared", 0)
+ totalRowsToCompare += row.AsInt64("table_rows", 0)
+
+ // If we had a mismatch on any table on any shard then the global VDiff
+ // summary does too.
+ if mm, _ := row.ToBool("has_mismatch"); mm {
+ summary.HasMismatch = true
+ }
+ }
+
+ // Table summary information that must be accounted for across all shards.
+ {
+ table := row.AsString("table_name", "")
+ if table == "" { // This occurs when the table diff has not started on 1 or more shards
+ continue
+ }
+ // Create the global VDiff table summary object if it doesn't exist.
+ if _, ok := tableSummaryMap[table]; !ok {
+ tableSummaryMap[table] = TableSummary{
+ TableName: table,
+ State: vdiff.UnknownState,
+ }
+
+ }
+ ts := tableSummaryMap[table]
+ // This is the shard level VDiff table state.
+ sts := vdiff.VDiffState(strings.ToLower(row.AsString("table_state", "")))
+ tableStateCounts[sts]++
+
+ // The error state must be sticky, and we should not override any other
+ // known state with completed.
+ switch sts {
+ case vdiff.CompletedState:
+ if ts.State == vdiff.UnknownState {
+ ts.State = sts
+ }
+ case vdiff.ErrorState:
+ ts.State = sts
+ default:
+ if ts.State != vdiff.ErrorState {
+ ts.State = sts
+ }
+ }
+
+ diffReport := row.AsString("report", "")
+ dr := vdiff.DiffReport{}
+ if diffReport != "" {
+ err := json.Unmarshal([]byte(diffReport), &dr)
+ if err != nil {
+ return nil, err
+ }
+ ts.RowsCompared += dr.ProcessedRows
+ ts.MismatchedRows += dr.MismatchedRows
+ ts.MatchingRows += dr.MatchingRows
+ ts.ExtraRowsTarget += dr.ExtraRowsTarget
+ ts.ExtraRowsSource += dr.ExtraRowsSource
+ }
+ if _, ok := reports[table]; !ok {
+ reports[table] = make(map[string]vdiff.DiffReport)
+ }
+
+ reports[table][shard] = dr
+ tableSummaryMap[table] = ts
+ }
+ }
+ }
+ }
+
+ // The global VDiff summary should progress from pending->started->completed with
+ // stopped for any shard and error for any table being sticky for the global summary.
+ // We should only consider the VDiff to be complete if it's completed for every table
+ // on every shard.
+ if shardStateCounts[vdiff.StoppedState] > 0 {
+ summary.State = vdiff.StoppedState
+ } else if shardStateCounts[vdiff.ErrorState] > 0 || tableStateCounts[vdiff.ErrorState] > 0 {
+ summary.State = vdiff.ErrorState
+ } else if tableStateCounts[vdiff.StartedState] > 0 {
+ summary.State = vdiff.StartedState
+ } else if tableStateCounts[vdiff.PendingState] > 0 {
+ summary.State = vdiff.PendingState
+ } else if tableStateCounts[vdiff.CompletedState] == (len(tableSummaryMap) * len(shards)) {
+ // When doing shard consolidations/merges, we cannot rely solely on the
+ // vdiff_table state as there are N sources that we process rows from sequentially
+ // with each one writing to the shared _vt.vdiff_table record for the target shard.
+ // So we only mark the vdiff for the shard as completed when we've finished
+ // processing rows from all of the sources -- which is recorded by marking the
+ // vdiff done for the shard by setting _vt.vdiff.state = completed.
+ if shardStateCounts[vdiff.CompletedState] == len(shards) {
+ summary.State = vdiff.CompletedState
+ } else {
+ summary.State = vdiff.StartedState
+ }
+ } else {
+ summary.State = vdiff.UnknownState
+ }
+
+ // If the vdiff has been started then we can calculate the progress.
+ if summary.State == vdiff.StartedState {
+ summary.Progress = BuildProgressReport(summary.RowsCompared, totalRowsToCompare, summary.StartedAt)
+ }
+
+ sort.Strings(shards) // Sort for predictable output
+ summary.Shards = strings.Join(shards, ",")
+ summary.TableSummaryMap = tableSummaryMap
+ summary.Reports = reports
+ if !summary.HasMismatch && !verbose {
+ summary.Reports = nil
+ summary.TableSummaryMap = nil
+ }
+ // If we haven't completed the global VDiff then be sure to reflect that with no
+ // CompletedAt value.
+ if summary.State != vdiff.CompletedState {
+ summary.CompletedAt = ""
+ }
+ return summary, nil
+}
+
+func BuildProgressReport(rowsCompared int64, rowsToCompare int64, startedAt string) *vdiff.ProgressReport {
+ report := &vdiff.ProgressReport{}
+ if rowsCompared >= 1 {
+ // Round to 2 decimal points.
+ report.Percentage = math.Round(math.Min((float64(rowsCompared)/float64(rowsToCompare))*100, 100.00)*100) / 100
+ }
+ if math.IsNaN(report.Percentage) {
+ report.Percentage = 0
+ }
+ pctToGo := math.Abs(report.Percentage - 100.00)
+ startTime, _ := time.Parse(vdiff.TimestampFormat, startedAt)
+ curTime := time.Now().UTC()
+ runTime := curTime.Unix() - startTime.Unix()
+ if report.Percentage >= 1 {
+ // Calculate how long 1% took, on avg, and multiply that by the % left.
+ eta := time.Unix(((int64(runTime)/int64(report.Percentage))*int64(pctToGo))+curTime.Unix(), 1).UTC()
+ // Cap the ETA at 1 year out to prevent providing nonsensical ETAs.
+ if eta.Before(time.Now().UTC().AddDate(1, 0, 0)) {
+ report.ETA = eta.Format(vdiff.TimestampFormat)
+ }
+ }
+ return report
+}
diff --git a/go/vt/vtctl/workflow/vdiff_test.go b/go/vt/vtctl/workflow/vdiff_test.go
new file mode 100644
index 00000000000..e5578afc170
--- /dev/null
+++ b/go/vt/vtctl/workflow/vdiff_test.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "math"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff"
+)
+
+func TestBuildProgressReport(t *testing.T) {
+ now := time.Now()
+ type args struct {
+ summary *Summary
+ rowsToCompare int64
+ }
+ tests := []struct {
+ name string
+ args args
+ want *vdiff.ProgressReport
+ }{
+ {
+ name: "no progress",
+ args: args{
+ summary: &Summary{RowsCompared: 0},
+ rowsToCompare: 100,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 0,
+ ETA: "", // no ETA
+ },
+ },
+ {
+ name: "one third of the way",
+ args: args{
+ summary: &Summary{
+ RowsCompared: 33,
+ StartedAt: now.Add(-10 * time.Second).UTC().Format(vdiff.TimestampFormat),
+ },
+ rowsToCompare: 100,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 33,
+ ETA: now.Add(20 * time.Second).UTC().Format(vdiff.TimestampFormat),
+ },
+ },
+ {
+ name: "half way",
+ args: args{
+ summary: &Summary{
+ RowsCompared: 5000000000,
+ StartedAt: now.Add(-10 * time.Hour).UTC().Format(vdiff.TimestampFormat),
+ },
+ rowsToCompare: 10000000000,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 50,
+ ETA: now.Add(10 * time.Hour).UTC().Format(vdiff.TimestampFormat),
+ },
+ },
+ {
+ name: "full progress",
+ args: args{
+ summary: &Summary{
+ RowsCompared: 100,
+ CompletedAt: now.UTC().Format(vdiff.TimestampFormat),
+ },
+ rowsToCompare: 100,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 100,
+ ETA: now.UTC().Format(vdiff.TimestampFormat),
+ },
+ },
+ {
+ name: "more than in I_S",
+ args: args{
+ summary: &Summary{
+ RowsCompared: 100,
+ CompletedAt: now.UTC().Format(vdiff.TimestampFormat),
+ },
+ rowsToCompare: 50,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 100,
+ ETA: now.UTC().Format(vdiff.TimestampFormat),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tt.args.summary.Progress = BuildProgressReport(tt.args.summary.RowsCompared, tt.args.rowsToCompare, tt.args.summary.StartedAt)
+ // We always check the percentage
+ require.Equal(t, int(tt.want.Percentage), int(tt.args.summary.Progress.Percentage))
+
+ // We only check the ETA if there is one.
+ if tt.want.ETA != "" {
+ // Let's check that we're within 1 second to avoid flakes.
+ wantTime, err := time.Parse(vdiff.TimestampFormat, tt.want.ETA)
+ require.NoError(t, err)
+ var timeDiff float64
+ if tt.want.Percentage == 100 {
+ completedTime, err := time.Parse(vdiff.TimestampFormat, tt.args.summary.CompletedAt)
+ require.NoError(t, err)
+ timeDiff = math.Abs(completedTime.Sub(wantTime).Seconds())
+ } else {
+ startTime, err := time.Parse(vdiff.TimestampFormat, tt.args.summary.StartedAt)
+ require.NoError(t, err)
+ completedTimeUnix := float64(now.UTC().Unix()-startTime.UTC().Unix()) * (100 / tt.want.Percentage)
+ estimatedTime, err := time.Parse(vdiff.TimestampFormat, tt.want.ETA)
+ require.NoError(t, err)
+ timeDiff = math.Abs(estimatedTime.Sub(startTime).Seconds() - completedTimeUnix)
+ }
+ require.LessOrEqual(t, timeDiff, 1.0)
+ }
+ })
+ }
+}
diff --git a/go/vt/vterrors/code.go b/go/vt/vterrors/code.go
index 31c98cef280..0ca275b71fc 100644
--- a/go/vt/vterrors/code.go
+++ b/go/vt/vterrors/code.go
@@ -102,6 +102,7 @@ var (
VT09028 = errorWithState("VT09028", vtrpcpb.Code_FAILED_PRECONDITION, CTERecursiveForbiddenJoinOrder, "In recursive query block of Recursive Common Table Expression '%s', the recursive table must neither be in the right argument of a LEFT JOIN, nor be forced to be non-first with join order hints", "")
VT09029 = errorWithState("VT09029", vtrpcpb.Code_FAILED_PRECONDITION, CTERecursiveRequiresSingleReference, "In recursive query block of Recursive Common Table Expression %s, the recursive table must be referenced only once, and not in any subquery", "")
VT09030 = errorWithState("VT09030", vtrpcpb.Code_FAILED_PRECONDITION, CTEMaxRecursionDepth, "Recursive query aborted after 1000 iterations.", "")
+ VT09031 = errorWithoutState("VT09031", vtrpcpb.Code_FAILED_PRECONDITION, "Primary demotion is stalled", "")
VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.")
VT10002 = errorWithoutState("VT10002", vtrpcpb.Code_ABORTED, "atomic distributed transaction not allowed: %s", "The distributed transaction cannot be committed. A rollback decision is taken.")
@@ -192,6 +193,8 @@ var (
VT09027,
VT09028,
VT09029,
+ VT09030,
+ VT09031,
VT10001,
VT10002,
VT12001,
diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go
index f9ae8be3820..d45073cd006 100644
--- a/go/vt/vtexplain/vtexplain_vtgate.go
+++ b/go/vt/vtexplain/vtexplain_vtgate.go
@@ -74,7 +74,7 @@ func (vte *VTExplain) initVtgateExecutor(ctx context.Context, ts *topo.Server, v
var schemaTracker vtgate.SchemaInfo // no schema tracker for these tests
queryLogBufferSize := 10
plans := theine.NewStore[vtgate.PlanCacheKey, *engine.Plan](4*1024*1024, false)
- vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.env, vte.explainTopo, Cell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0)
+ vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.env, vte.explainTopo, Cell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0, vtgate.NewDynamicViperConfig())
vte.vtgateExecutor.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize))
return nil
@@ -88,7 +88,9 @@ func (vte *VTExplain) newFakeResolver(ctx context.Context, opts *Options, serv s
if opts.ExecutionMode == ModeTwoPC {
txMode = vtgatepb.TransactionMode_TWOPC
}
- tc := vtgate.NewTxConn(gw, txMode)
+ tc := vtgate.NewTxConn(gw, &vtgate.StaticConfig{
+ TxMode: txMode,
+ })
sc := vtgate.NewScatterConn("", tc, gw)
srvResolver := srvtopo.NewResolver(serv, gw, cell)
return vtgate.NewResolver(srvResolver, serv, cell, sc)
diff --git a/go/vt/vtgate/dynamicconfig/config.go b/go/vt/vtgate/dynamicconfig/config.go
index 5bb1d991eae..014160029cd 100644
--- a/go/vt/vtgate/dynamicconfig/config.go
+++ b/go/vt/vtgate/dynamicconfig/config.go
@@ -1,6 +1,28 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
package dynamicconfig
+import vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+
type DDL interface {
OnlineEnabled() bool
DirectEnabled() bool
}
+
+type TxMode interface {
+ TransactionMode() vtgatepb.TransactionMode
+}
diff --git a/go/vt/vtgate/engine/fake_primitive_test.go b/go/vt/vtgate/engine/fake_primitive_test.go
index b878c1931c0..f3ab5ad5336 100644
--- a/go/vt/vtgate/engine/fake_primitive_test.go
+++ b/go/vt/vtgate/engine/fake_primitive_test.go
@@ -40,7 +40,8 @@ type fakePrimitive struct {
// sendErr is sent at the end of the stream if it's set.
sendErr error
- log []string
+ noLog bool
+ log []string
allResultsInOneCall bool
@@ -85,7 +86,9 @@ func (f *fakePrimitive) TryExecute(ctx context.Context, vcursor VCursor, bindVar
}
func (f *fakePrimitive) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
- f.log = append(f.log, fmt.Sprintf("StreamExecute %v %v", printBindVars(bindVars), wantfields))
+ if !f.noLog {
+ f.log = append(f.log, fmt.Sprintf("StreamExecute %v %v", printBindVars(bindVars), wantfields))
+ }
if f.results == nil {
return f.sendErr
}
diff --git a/go/vt/vtgate/engine/semi_join.go b/go/vt/vtgate/engine/semi_join.go
index f0dd0d09033..b5bc74a5941 100644
--- a/go/vt/vtgate/engine/semi_join.go
+++ b/go/vt/vtgate/engine/semi_join.go
@@ -18,6 +18,7 @@ package engine
import (
"context"
+ "sync/atomic"
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
@@ -62,24 +63,26 @@ func (jn *SemiJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma
// TryStreamExecute performs a streaming exec.
func (jn *SemiJoin) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error {
- joinVars := make(map[string]*querypb.BindVariable)
err := vcursor.StreamExecutePrimitive(ctx, jn.Left, bindVars, wantfields, func(lresult *sqltypes.Result) error {
+ joinVars := make(map[string]*querypb.BindVariable)
result := &sqltypes.Result{Fields: lresult.Fields}
for _, lrow := range lresult.Rows {
for k, col := range jn.Vars {
joinVars[k] = sqltypes.ValueBindVariable(lrow[col])
}
- rowAdded := false
+ var rowAdded atomic.Bool
err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), false, func(rresult *sqltypes.Result) error {
- if len(rresult.Rows) > 0 && !rowAdded {
- result.Rows = append(result.Rows, lrow)
- rowAdded = true
+ if len(rresult.Rows) > 0 {
+ rowAdded.Store(true)
}
return nil
})
if err != nil {
return err
}
+ if rowAdded.Load() {
+ result.Rows = append(result.Rows, lrow)
+ }
}
return callback(result)
})
diff --git a/go/vt/vtgate/engine/semi_join_test.go b/go/vt/vtgate/engine/semi_join_test.go
index 8fee0490415..a103b0686b2 100644
--- a/go/vt/vtgate/engine/semi_join_test.go
+++ b/go/vt/vtgate/engine/semi_join_test.go
@@ -18,6 +18,7 @@ package engine
import (
"context"
+ "sync"
"testing"
"vitess.io/vitess/go/test/utils"
@@ -159,3 +160,81 @@ func TestSemiJoinStreamExecute(t *testing.T) {
"4|d|dd",
))
}
+
+// TestSemiJoinStreamExecuteParallelExecution tests SemiJoin stream execution with parallel execution
+// to ensure we have no data races.
+func TestSemiJoinStreamExecuteParallelExecution(t *testing.T) {
+ leftPrim := &fakePrimitive{
+ results: []*sqltypes.Result{
+ sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "col1|col2|col3",
+ "int64|varchar|varchar",
+ ),
+ "1|a|aa",
+ "2|b|bb",
+ ), sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "col1|col2|col3",
+ "int64|varchar|varchar",
+ ),
+ "3|c|cc",
+ "4|d|dd",
+ ),
+ },
+ async: true,
+ }
+ rightFields := sqltypes.MakeTestFields(
+ "col4|col5|col6",
+ "int64|varchar|varchar",
+ )
+ rightPrim := &fakePrimitive{
+ // we'll return non-empty results for rows 2 and 4
+ results: sqltypes.MakeTestStreamingResults(rightFields,
+ "4|d|dd",
+ "---",
+ "---",
+ "5|e|ee",
+ "6|f|ff",
+ "7|g|gg",
+ ),
+ async: true,
+ noLog: true,
+ }
+
+ jn := &SemiJoin{
+ Left: leftPrim,
+ Right: rightPrim,
+ Vars: map[string]int{
+ "bv": 1,
+ },
+ }
+ var res *sqltypes.Result
+ var mu sync.Mutex
+ err := jn.TryStreamExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error {
+ mu.Lock()
+ defer mu.Unlock()
+ if res == nil {
+ res = result
+ } else {
+ res.Rows = append(res.Rows, result.Rows...)
+ }
+ return nil
+ })
+ require.NoError(t, err)
+ leftPrim.ExpectLog(t, []string{
+ `StreamExecute true`,
+ })
+ // We'll get all the rows back in left primitive, since we're returning the same set of rows
+ // from the right primitive that makes them all qualify.
+ expectResultAnyOrder(t, res, sqltypes.MakeTestResult(
+ sqltypes.MakeTestFields(
+ "col1|col2|col3",
+ "int64|varchar|varchar",
+ ),
+ "1|a|aa",
+ "2|b|bb",
+ "3|c|cc",
+ "4|d|dd",
+ ))
+}
diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go
index ea3a7a603ce..7396529dcf1 100644
--- a/go/vt/vtgate/evalengine/compiler_test.go
+++ b/go/vt/vtgate/evalengine/compiler_test.go
@@ -740,6 +740,26 @@ func TestCompilerSingle(t *testing.T) {
expression: `cast(_utf32 0x0000FF as binary)`,
result: `VARBINARY("\x00\x00\x00\xff")`,
},
+ {
+ expression: `DATE_FORMAT(timestamp '2024-12-30 10:34:58', "%u")`,
+ result: `VARCHAR("53")`,
+ },
+ {
+ expression: `WEEK(timestamp '2024-12-30 10:34:58', 0)`,
+ result: `INT64(52)`,
+ },
+ {
+ expression: `WEEK(timestamp '2024-12-30 10:34:58', 1)`,
+ result: `INT64(53)`,
+ },
+ {
+ expression: `WEEK(timestamp '2024-01-01 10:34:58', 0)`,
+ result: `INT64(0)`,
+ },
+ {
+ expression: `WEEK(timestamp '2024-01-01 10:34:58', 1)`,
+ result: `INT64(1)`,
+ },
}
tz, _ := time.LoadLocation("Europe/Madrid")
diff --git a/go/vt/vtgate/evalengine/testcases/inputs.go b/go/vt/vtgate/evalengine/testcases/inputs.go
index ac23281fd54..4c65cc5002f 100644
--- a/go/vt/vtgate/evalengine/testcases/inputs.go
+++ b/go/vt/vtgate/evalengine/testcases/inputs.go
@@ -108,6 +108,7 @@ var inputConversions = []string{
"time '10:04:58'", "time '31:34:58'", "time '32:34:58'", "time '130:34:58'", "time '5 10:34:58'",
"time '10:04:58.1'", "time '31:34:58.4'", "time '32:34:58.5'", "time '130:34:58.6'", "time '5 10:34:58.9'", "date '2000-01-01'",
"timestamp '2000-01-01 10:34:58'", "timestamp '2000-01-01 10:34:58.123456'", "timestamp '2000-01-01 10:34:58.978654'",
+ "timestamp '2024-12-30 10:34:58'",
"20000101103458", "20000101103458.1234", "20000101103458.123456", "20000101", "103458", "103458.123456",
"'20000101103458'", "'20000101103458.1234'", "'20000101103458.123456'", "'20000101'", "'103458'", "'103458.123456'",
"'20000101103458foo'", "'20000101103458.1234foo'", "'20000101103458.123456foo'", "'20000101foo'", "'103458foo'", "'103458.123456foo'",
diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go
index d3d2ba8e8fd..0281e28700f 100644
--- a/go/vt/vtgate/executor.go
+++ b/go/vt/vtgate/executor.go
@@ -31,6 +31,7 @@ import (
"github.com/spf13/pflag"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+ "vitess.io/vitess/go/vt/vtgate/dynamicconfig"
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/cache/theine"
@@ -136,7 +137,8 @@ type Executor struct {
warmingReadsPercent int
warmingReadsChannel chan bool
- vConfig econtext.VCursorConfig
+ vConfig econtext.VCursorConfig
+ ddlConfig dynamicconfig.DDL
}
var executorOnce sync.Once
@@ -168,6 +170,7 @@ func NewExecutor(
noScatter bool,
pv plancontext.PlannerVersion,
warmingReadsPercent int,
+ ddlConfig dynamicconfig.DDL,
) *Executor {
e := &Executor{
env: env,
@@ -183,6 +186,7 @@ func NewExecutor(
plans: plans,
warmingReadsPercent: warmingReadsPercent,
warmingReadsChannel: make(chan bool, warmingReadsConcurrency),
+ ddlConfig: ddlConfig,
}
// setting the vcursor config.
e.initVConfig(warnOnShardedOnly, pv)
@@ -484,7 +488,7 @@ func (e *Executor) addNeededBindVars(vcursor *econtext.VCursorImpl, bindVarNeeds
case sysvars.TransactionMode.Name:
txMode := session.TransactionMode
if txMode == vtgatepb.TransactionMode_UNSPECIFIED {
- txMode = getTxMode()
+ txMode = transactionMode.Get()
}
bindVars[key] = sqltypes.StringBindVariable(txMode.String())
case sysvars.Workload.Name:
@@ -1156,11 +1160,7 @@ func (e *Executor) buildStatement(
reservedVars *sqlparser.ReservedVars,
bindVarNeeds *sqlparser.BindVarNeeds,
) (*engine.Plan, error) {
- cfg := &dynamicViperConfig{
- onlineDDL: enableOnlineDDL,
- directDDL: enableDirectDDL,
- }
- plan, err := planbuilder.BuildFromStmt(ctx, query, stmt, reservedVars, vcursor, bindVarNeeds, cfg)
+ plan, err := planbuilder.BuildFromStmt(ctx, query, stmt, reservedVars, vcursor, bindVarNeeds, e.ddlConfig)
if err != nil {
return nil, err
}
diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go
index 2ee3425209f..43987217039 100644
--- a/go/vt/vtgate/executor_framework_test.go
+++ b/go/vt/vtgate/executor_framework_test.go
@@ -183,7 +183,7 @@ func createExecutorEnvCallback(t testing.TB, eachShard func(shard, ks string, ta
// one-off queries from thrashing the cache. Disable the doorkeeper in the tests to prevent flakiness.
plans := theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, false)
- executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0)
+ executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig())
executor.SetQueryLogger(queryLogger)
key.AnyShardPicker = DestinationAnyShardPickerFirstShard{}
@@ -232,7 +232,7 @@ func createCustomExecutor(t testing.TB, vschema string, mysqlVersion string) (ex
plans := DefaultPlanCache()
env, err := vtenv.New(vtenv.Options{MySQLServerVersion: mysqlVersion})
require.NoError(t, err)
- executor = NewExecutor(ctx, env, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0)
+ executor = NewExecutor(ctx, env, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig())
executor.SetQueryLogger(queryLogger)
t.Cleanup(func() {
@@ -269,7 +269,7 @@ func createCustomExecutorSetValues(t testing.TB, vschema string, values []*sqlty
sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil)
queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)
plans := DefaultPlanCache()
- executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0)
+ executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig())
executor.SetQueryLogger(queryLogger)
t.Cleanup(func() {
@@ -294,7 +294,7 @@ func createExecutorEnvWithPrimaryReplicaConn(t testing.TB, ctx context.Context,
replica = hc.AddTestTablet(cell, "0-replica", 1, KsTestUnsharded, "0", topodatapb.TabletType_REPLICA, true, 1, nil)
queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)
- executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent)
+ executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent, NewDynamicViperConfig())
executor.SetQueryLogger(queryLogger)
t.Cleanup(func() {
diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go
index 411f19bb30d..16628729ac6 100644
--- a/go/vt/vtgate/executor_select_test.go
+++ b/go/vt/vtgate/executor_select_test.go
@@ -1644,7 +1644,7 @@ func TestSelectListArg(t *testing.T) {
func createExecutor(ctx context.Context, serv *sandboxTopo, cell string, resolver *Resolver) *Executor {
queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)
plans := DefaultPlanCache()
- ex := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0)
+ ex := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig())
ex.SetQueryLogger(queryLogger)
return ex
}
@@ -3326,7 +3326,7 @@ func TestStreamOrderByWithMultipleResults(t *testing.T) {
}
queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)
plans := DefaultPlanCache()
- executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0)
+ executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig())
executor.SetQueryLogger(queryLogger)
defer executor.Close()
// some sleep for all goroutines to start
@@ -3369,7 +3369,7 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) {
}
queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)
plans := DefaultPlanCache()
- executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0)
+ executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig())
executor.SetQueryLogger(queryLogger)
defer executor.Close()
// some sleep for all goroutines to start
diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go
index a8500dd59c4..8bb10aae8fb 100644
--- a/go/vt/vtgate/executor_stream_test.go
+++ b/go/vt/vtgate/executor_stream_test.go
@@ -68,7 +68,7 @@ func TestStreamSQLSharded(t *testing.T) {
queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)
plans := DefaultPlanCache()
- executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0)
+ executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, NewDynamicViperConfig())
executor.SetQueryLogger(queryLogger)
defer executor.Close()
diff --git a/go/vt/vtgate/legacy_scatter_conn_test.go b/go/vt/vtgate/legacy_scatter_conn_test.go
index e31c5ae8161..fecd6c2a8b1 100644
--- a/go/vt/vtgate/legacy_scatter_conn_test.go
+++ b/go/vt/vtgate/legacy_scatter_conn_test.go
@@ -522,7 +522,7 @@ func TestScatterConnSingleDB(t *testing.T) {
assert.Contains(t, errors[0].Error(), want)
// TransactionMode_SINGLE in txconn
- sc.txConn.mode = vtgatepb.TransactionMode_SINGLE
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_SINGLE}
session = econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, errors = sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{}, false)
require.Empty(t, errors)
@@ -531,7 +531,7 @@ func TestScatterConnSingleDB(t *testing.T) {
assert.Contains(t, errors[0].Error(), want)
// TransactionMode_MULTI in txconn. Should not fail.
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
session = econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
_, errors = sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false, nullResultsObserver{}, false)
require.Empty(t, errors)
@@ -622,6 +622,8 @@ func newTestScatterConn(ctx context.Context, hc discovery.HealthCheck, serv srvt
// in '-cells_to_watch' command line parameter, which is
// empty by default. So it's unused in this test, set to nil.
gw := NewTabletGateway(ctx, hc, serv, cell)
- tc := NewTxConn(gw, vtgatepb.TransactionMode_MULTI)
+ tc := NewTxConn(gw, &StaticConfig{
+ TxMode: vtgatepb.TransactionMode_MULTI,
+ })
return NewScatterConn("", tc, gw)
}
diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go
index edba48a9151..85f236a9a18 100644
--- a/go/vt/vtgate/scatter_conn.go
+++ b/go/vt/vtgate/scatter_conn.go
@@ -685,7 +685,7 @@ func (stc *ScatterConn) multiGoTransaction(
startTime, statsKey := stc.startAction(name, rs.Target)
defer stc.endAction(startTime, allErrors, statsKey, &err, session)
- info, shardSession, err := actionInfo(ctx, rs.Target, session, autocommit, stc.txConn.mode)
+ info, shardSession, err := actionInfo(ctx, rs.Target, session, autocommit, stc.txConn.txMode.TransactionMode())
if err != nil {
return
}
@@ -702,7 +702,7 @@ func (stc *ScatterConn) multiGoTransaction(
shardSession.RowsAffected = info.rowsAffected
}
if info.actionNeeded != nothing && (info.transactionID != 0 || info.reservedID != 0) {
- appendErr := session.AppendOrUpdate(rs.Target, info, shardSession, stc.txConn.mode)
+ appendErr := session.AppendOrUpdate(rs.Target, info, shardSession, stc.txConn.txMode.TransactionMode())
if appendErr != nil {
err = appendErr
}
diff --git a/go/vt/vtgate/static_config.go b/go/vt/vtgate/static_config.go
new file mode 100644
index 00000000000..f78545ebc5b
--- /dev/null
+++ b/go/vt/vtgate/static_config.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vtgate
+
+import vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+
+// StaticConfig is a static configuration for vtgate.
+// It is used for tests and vtexplain_vtgate where we don't want the user to
+// control certain configs.
+type StaticConfig struct {
+ OnlineDDLEnabled bool
+ DirectDDLEnabled bool
+ TxMode vtgatepb.TransactionMode
+}
+
+func (s *StaticConfig) OnlineEnabled() bool {
+ return s.OnlineDDLEnabled
+}
+
+func (s *StaticConfig) DirectEnabled() bool {
+ return s.DirectDDLEnabled
+}
+
+func (s *StaticConfig) TransactionMode() vtgatepb.TransactionMode {
+ return s.TxMode
+}
diff --git a/go/vt/vtgate/tx_conn.go b/go/vt/vtgate/tx_conn.go
index cadb1392eca..dbd76b04c7a 100644
--- a/go/vt/vtgate/tx_conn.go
+++ b/go/vt/vtgate/tx_conn.go
@@ -33,6 +33,7 @@ import (
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/dynamicconfig"
econtext "vitess.io/vitess/go/vt/vtgate/executorcontext"
"vitess.io/vitess/go/vt/vttablet/queryservice"
)
@@ -44,14 +45,14 @@ const nonAtomicCommitWarnMaxShards = 16
// TxConn is used for executing transactional requests.
type TxConn struct {
tabletGateway *TabletGateway
- mode vtgatepb.TransactionMode
+ txMode dynamicconfig.TxMode
}
// NewTxConn builds a new TxConn.
-func NewTxConn(gw *TabletGateway, txMode vtgatepb.TransactionMode) *TxConn {
+func NewTxConn(gw *TabletGateway, txMode dynamicconfig.TxMode) *TxConn {
return &TxConn{
tabletGateway: gw,
- mode: txMode,
+ txMode: txMode,
}
}
@@ -114,7 +115,7 @@ func (txc *TxConn) Commit(ctx context.Context, session *econtext.SafeSession) er
case vtgatepb.TransactionMode_TWOPC:
twopc = true
case vtgatepb.TransactionMode_UNSPECIFIED:
- twopc = txc.mode == vtgatepb.TransactionMode_TWOPC
+ twopc = txc.txMode.TransactionMode() == vtgatepb.TransactionMode_TWOPC
}
defer recordCommitTime(session, twopc, time.Now())
diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go
index d96f0b8fccf..6d31aa4e543 100644
--- a/go/vt/vtgate/tx_conn_test.go
+++ b/go/vt/vtgate/tx_conn_test.go
@@ -72,7 +72,7 @@ func TestTxConnCommitFailure(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbcs, rssm, rssa := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 3)
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"]
// Sequence the executes to ensure commit order
@@ -173,7 +173,7 @@ func TestTxConnCommitFailureAfterNonAtomicCommitMaxShards(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbcs, rssm, _ := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 18)
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"]
// Sequence the executes to ensure commit order
@@ -227,7 +227,7 @@ func TestTxConnCommitFailureBeforeNonAtomicCommitMaxShards(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbcs, rssm, _ := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 17)
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"]
// Sequence the executes to ensure commit order
@@ -281,7 +281,7 @@ func TestTxConnCommitSuccess(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn")
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
// Sequence the executes to ensure commit order
session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true})
@@ -334,7 +334,7 @@ func TestTxConnReservedCommitSuccess(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn")
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
// Sequence the executes to ensure commit order
session := econtext.NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true})
@@ -419,7 +419,7 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndCommit(t *testing.T) {
keyspace := "TestTxConn"
sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, keyspace)
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
// Sequence the executes to ensure shard session order
session := econtext.NewSafeSession(&vtgatepb.Session{InReservedConn: true})
@@ -514,7 +514,7 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndRollback(t *testing.T) {
keyspace := "TestTxConn"
sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, keyspace)
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
// Sequence the executes to ensure shard session order
session := econtext.NewSafeSession(&vtgatepb.Session{InReservedConn: true})
@@ -608,7 +608,7 @@ func TestTxConnCommitOrderFailure1(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn")
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
queries := []*querypb.BoundQuery{{Sql: "query1"}}
@@ -641,7 +641,7 @@ func TestTxConnCommitOrderFailure2(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn")
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
queries := []*querypb.BoundQuery{{
Sql: "query1",
@@ -675,7 +675,7 @@ func TestTxConnCommitOrderFailure3(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn")
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
queries := []*querypb.BoundQuery{{
Sql: "query1",
@@ -717,7 +717,7 @@ func TestTxConnCommitOrderSuccess(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn")
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
queries := []*querypb.BoundQuery{{
Sql: "query1",
@@ -815,7 +815,7 @@ func TestTxConnReservedCommitOrderSuccess(t *testing.T) {
ctx := utils.LeakCheckContext(t)
sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn")
- sc.txConn.mode = vtgatepb.TransactionMode_MULTI
+ sc.txConn.txMode = &StaticConfig{TxMode: vtgatepb.TransactionMode_MULTI}
queries := []*querypb.BoundQuery{{
Sql: "query1",
diff --git a/go/vt/vtgate/viper_config.go b/go/vt/vtgate/viper_config.go
new file mode 100644
index 00000000000..68430b7be2c
--- /dev/null
+++ b/go/vt/vtgate/viper_config.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vtgate
+
+import (
+ "vitess.io/vitess/go/viperutil"
+ vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
+)
+
+// DynamicViperConfig is a dynamic config that uses viper.
+type DynamicViperConfig struct {
+ onlineDDL viperutil.Value[bool]
+ directDDL viperutil.Value[bool]
+ txMode viperutil.Value[vtgatepb.TransactionMode]
+}
+
+// NewDynamicViperConfig creates a new dynamic viper config
+func NewDynamicViperConfig() *DynamicViperConfig {
+ return &DynamicViperConfig{
+ onlineDDL: enableOnlineDDL,
+ directDDL: enableDirectDDL,
+ txMode: transactionMode,
+ }
+}
+
+func (d *DynamicViperConfig) OnlineEnabled() bool {
+ return d.onlineDDL.Get()
+}
+
+func (d *DynamicViperConfig) DirectEnabled() bool {
+ return d.directDDL.Get()
+}
+
+func (d *DynamicViperConfig) TransactionMode() vtgatepb.TransactionMode {
+ return d.txMode.Get()
+}
diff --git a/go/vt/vtgate/viperconfig.go b/go/vt/vtgate/viperconfig.go
deleted file mode 100644
index ec77ff62d4f..00000000000
--- a/go/vt/vtgate/viperconfig.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package vtgate
-
-import "vitess.io/vitess/go/viperutil"
-
-type dynamicViperConfig struct {
- onlineDDL viperutil.Value[bool]
- directDDL viperutil.Value[bool]
-}
-
-func (d *dynamicViperConfig) OnlineEnabled() bool {
- return d.onlineDDL.Get()
-}
-
-func (d *dynamicViperConfig) DirectEnabled() bool {
- return d.directDDL.Get()
-}
diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go
index 8bab05479dd..a1dcd3219f6 100644
--- a/go/vt/vtgate/vtgate.go
+++ b/go/vt/vtgate/vtgate.go
@@ -29,6 +29,7 @@ import (
"time"
"github.com/spf13/pflag"
+ "github.com/spf13/viper"
"vitess.io/vitess/go/acl"
"vitess.io/vitess/go/sqltypes"
@@ -60,7 +61,6 @@ import (
)
var (
- transactionMode = "MULTI"
normalizeQueries = true
streamBufferSize = 32 * 1024
@@ -114,6 +114,33 @@ var (
},
)
+ transactionMode = viperutil.Configure(
+ "transaction_mode",
+ viperutil.Options[vtgatepb.TransactionMode]{
+ FlagName: "transaction_mode",
+ Default: vtgatepb.TransactionMode_MULTI,
+ Dynamic: true,
+ GetFunc: func(v *viper.Viper) func(key string) vtgatepb.TransactionMode {
+ return func(key string) vtgatepb.TransactionMode {
+ txMode := v.GetString(key)
+ switch strings.ToLower(txMode) {
+ case "single":
+ return vtgatepb.TransactionMode_SINGLE
+ case "multi":
+ return vtgatepb.TransactionMode_MULTI
+ case "twopc":
+ return vtgatepb.TransactionMode_TWOPC
+ default:
+ fmt.Printf("Invalid option: %v\n", txMode)
+ fmt.Println("Usage: -transaction_mode {SINGLE | MULTI | TWOPC}")
+ os.Exit(1)
+ return -1
+ }
+ }
+ },
+ },
+ )
+
// schema tracking flags
enableSchemaChangeSignal = true
enableViews bool
@@ -138,7 +165,7 @@ var (
)
func registerFlags(fs *pflag.FlagSet) {
- fs.StringVar(&transactionMode, "transaction_mode", transactionMode, "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit")
+ fs.String("transaction_mode", "MULTI", "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit")
fs.BoolVar(&normalizeQueries, "normalize_queries", normalizeQueries, "Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars.")
fs.BoolVar(&terseErrors, "vtgate-config-terse-errors", terseErrors, "prevent bind vars from escaping in returned errors")
fs.IntVar(&truncateErrorLen, "truncate-error-len", truncateErrorLen, "truncate errors sent to client if they are longer than this value (0 means do not truncate)")
@@ -173,7 +200,11 @@ func registerFlags(fs *pflag.FlagSet) {
fs.IntVar(&warmingReadsConcurrency, "warming-reads-concurrency", 500, "Number of concurrent warming reads allowed")
fs.DurationVar(&warmingReadsQueryTimeout, "warming-reads-query-timeout", 5*time.Second, "Timeout of warming read queries")
- viperutil.BindFlags(fs, enableOnlineDDL, enableDirectDDL)
+ viperutil.BindFlags(fs,
+ enableOnlineDDL,
+ enableDirectDDL,
+ transactionMode,
+ )
}
func init() {
@@ -181,25 +212,6 @@ func init() {
servenv.OnParseFor("vtcombo", registerFlags)
}
-func getTxMode() vtgatepb.TransactionMode {
- switch strings.ToLower(transactionMode) {
- case "single":
- log.Infof("Transaction mode: '%s'", transactionMode)
- return vtgatepb.TransactionMode_SINGLE
- case "multi":
- log.Infof("Transaction mode: '%s'", transactionMode)
- return vtgatepb.TransactionMode_MULTI
- case "twopc":
- log.Infof("Transaction mode: '%s'", transactionMode)
- return vtgatepb.TransactionMode_TWOPC
- default:
- fmt.Printf("Invalid option: %v\n", transactionMode)
- fmt.Println("Usage: -transaction_mode {SINGLE | MULTI | TWOPC}")
- os.Exit(1)
- return -1
- }
-}
-
var (
// vschemaCounters needs to be initialized before planner to
// catch the initial load stats.
@@ -287,6 +299,8 @@ func Init(
log.Fatalf("tabletGateway.WaitForTablets failed: %v", err)
}
+ dynamicConfig := NewDynamicViperConfig()
+
// If we want to filter keyspaces replace the srvtopo.Server with a
// filtering server
if discovery.FilteringKeyspaces() {
@@ -301,7 +315,7 @@ func Init(
if _, err := schema.ParseDDLStrategy(defaultDDLStrategy); err != nil {
log.Fatalf("Invalid value for -ddl_strategy: %v", err.Error())
}
- tc := NewTxConn(gw, getTxMode())
+ tc := NewTxConn(gw, dynamicConfig)
// ScatterConn depends on TxConn to perform forced rollbacks.
sc := NewScatterConn("VttabletCall", tc, gw)
// TxResolver depends on TxConn to complete distributed transaction.
@@ -352,6 +366,7 @@ func Init(
noScatter,
pv,
warmingReadsPercent,
+ dynamicConfig,
)
if err := executor.defaultQueryLogger(); err != nil {
diff --git a/go/vt/vtorc/db/generate_base.go b/go/vt/vtorc/db/generate_base.go
index f997dc6ac0a..21375fb8eb3 100644
--- a/go/vt/vtorc/db/generate_base.go
+++ b/go/vt/vtorc/db/generate_base.go
@@ -69,10 +69,8 @@ CREATE TABLE database_instance (
last_sql_error TEXT not null default '',
last_io_error TEXT not null default '',
oracle_gtid TINYint not null default 0,
- mariadb_gtid TINYint not null default 0,
relay_log_file varchar(128) not null default '',
relay_log_pos bigint not null default 0,
- pseudo_gtid TINYint not null default 0,
replication_depth TINYint not null default 0,
has_replication_filters TINYint not null default 0,
data_center varchar(32) not null default '',
diff --git a/go/vt/vtorc/inst/analysis.go b/go/vt/vtorc/inst/analysis.go
index 3e9e81c5c9f..fa2e1a4ec95 100644
--- a/go/vt/vtorc/inst/analysis.go
+++ b/go/vt/vtorc/inst/analysis.go
@@ -108,7 +108,6 @@ type ReplicationAnalysis struct {
Description string
StructureAnalysis []StructureAnalysisCode
OracleGTIDImmediateTopology bool
- MariaDBGTIDImmediateTopology bool
BinlogServerImmediateTopology bool
SemiSyncPrimaryEnabled bool
SemiSyncPrimaryStatus bool
diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go
index 07830bf7dda..5b75ae80672 100644
--- a/go/vt/vtorc/inst/analysis_dao.go
+++ b/go/vt/vtorc/inst/analysis_dao.go
@@ -183,17 +183,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna
),
0
) AS count_valid_semi_sync_replicas,
- MIN(
- primary_instance.mariadb_gtid
- ) AS is_mariadb_gtid,
- SUM(replica_instance.mariadb_gtid) AS count_mariadb_gtid_replicas,
- IFNULL(
- SUM(
- replica_instance.last_checked <= replica_instance.last_seen
- AND replica_instance.mariadb_gtid != 0
- ),
- 0
- ) AS count_valid_mariadb_gtid_replicas,
IFNULL(
SUM(
replica_instance.log_bin
@@ -339,8 +328,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna
countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas")
a.OracleGTIDImmediateTopology = countValidOracleGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0
- countValidMariaDBGTIDReplicas := m.GetUint("count_valid_mariadb_gtid_replicas")
- a.MariaDBGTIDImmediateTopology = countValidMariaDBGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0
countValidBinlogServerReplicas := m.GetUint("count_valid_binlog_server_replicas")
a.BinlogServerImmediateTopology = countValidBinlogServerReplicas == a.CountValidReplicas && a.CountValidReplicas > 0
a.SemiSyncPrimaryEnabled = m.GetBool("semi_sync_primary_enabled")
@@ -541,7 +528,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna
}
if a.IsPrimary && a.CountReplicas > 1 &&
!a.OracleGTIDImmediateTopology &&
- !a.MariaDBGTIDImmediateTopology &&
!a.BinlogServerImmediateTopology {
a.StructureAnalysis = append(a.StructureAnalysis, NoFailoverSupportStructureWarning)
}
diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go
index c061d54ebb3..0dc24fef7e6 100644
--- a/go/vt/vtorc/inst/analysis_dao_test.go
+++ b/go/vt/vtorc/inst/analysis_dao_test.go
@@ -33,10 +33,10 @@ var (
// The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here.
// This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica.
initialSQL = []string{
- `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`,
- `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`,
- `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`,
- `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`,
+ `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,'vt-0000000112-relay-bin.000002',15815,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`,
+ `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,'vt-0000000100-relay-bin.000002',15815,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`,
+ `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,0,0,'',0,'',0,NULL,NULL,0,'','',0,'',0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`,
+ `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,'vt-0000000200-relay-bin.000002',15815,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`,
`INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`,
`INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`,
`INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`,
diff --git a/go/vt/vtorc/inst/instance.go b/go/vt/vtorc/inst/instance.go
index 36f47b7ab0b..fef1e90acce 100644
--- a/go/vt/vtorc/inst/instance.go
+++ b/go/vt/vtorc/inst/instance.go
@@ -56,8 +56,6 @@ type Instance struct {
GTIDMode string
SupportsOracleGTID bool
UsingOracleGTID bool
- UsingMariaDBGTID bool
- UsingPseudoGTID bool // Legacy. Always 'false'
ReadBinlogCoordinates BinlogCoordinates
ExecBinlogCoordinates BinlogCoordinates
IsDetached bool
@@ -134,11 +132,6 @@ func (instance *Instance) MajorVersionString() string {
return strings.Join(instance.MajorVersion(), ".")
}
-// IsMariaDB checks whether this is any version of MariaDB
-func (instance *Instance) IsMariaDB() bool {
- return strings.Contains(instance.Version, "MariaDB")
-}
-
// IsPercona checks whether this is any version of Percona Server
func (instance *Instance) IsPercona() bool {
return strings.Contains(instance.VersionComment, "Percona")
@@ -151,9 +144,6 @@ func (instance *Instance) IsBinlogServer() bool {
// IsOracleMySQL checks whether this is an Oracle MySQL distribution
func (instance *Instance) IsOracleMySQL() bool {
- if instance.IsMariaDB() {
- return false
- }
if instance.IsPercona() {
return false
}
@@ -170,8 +160,6 @@ func (instance *Instance) applyFlavorName() {
}
if instance.IsOracleMySQL() {
instance.FlavorName = "MySQL"
- } else if instance.IsMariaDB() {
- instance.FlavorName = "MariaDB"
} else if instance.IsPercona() {
instance.FlavorName = "Percona"
} else {
@@ -220,7 +208,7 @@ func (instance *Instance) SQLThreadUpToDate() bool {
return instance.ReadBinlogCoordinates.Equals(&instance.ExecBinlogCoordinates)
}
-// UsingGTID returns true when this replica is currently replicating via GTID (either Oracle or MariaDB)
+// UsingGTID returns true when this replica is currently replicating via GTID
func (instance *Instance) UsingGTID() bool {
- return instance.UsingOracleGTID || instance.UsingMariaDBGTID
+ return instance.UsingOracleGTID
}
diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go
index d1421dbc91d..66aef7c8a78 100644
--- a/go/vt/vtorc/inst/instance_dao.go
+++ b/go/vt/vtorc/inst/instance_dao.go
@@ -291,7 +291,6 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named
instance.SQLDelay = fs.ReplicationStatus.SqlDelay
instance.UsingOracleGTID = fs.ReplicationStatus.AutoPosition
- instance.UsingMariaDBGTID = fs.ReplicationStatus.UsingGtid
instance.SourceUUID = fs.ReplicationStatus.SourceUuid
instance.HasReplicationFilters = fs.ReplicationStatus.HasReplicationFilters
@@ -548,7 +547,6 @@ func readInstanceRow(m sqlutils.RowMap) *Instance {
instance.GTIDMode = m.GetString("gtid_mode")
instance.GtidPurged = m.GetString("gtid_purged")
instance.GtidErrant = m.GetString("gtid_errant")
- instance.UsingMariaDBGTID = m.GetBool("mariadb_gtid")
instance.SelfBinlogCoordinates.LogFile = m.GetString("binary_log_file")
instance.SelfBinlogCoordinates.LogPos = m.GetUint32("binary_log_pos")
instance.ReadBinlogCoordinates.LogFile = m.GetString("source_log_file")
@@ -849,8 +847,6 @@ func mkInsertForInstances(instances []*Instance, instanceWasActuallyFound bool,
"gtid_mode",
"gtid_purged",
"gtid_errant",
- "mariadb_gtid",
- "pseudo_gtid",
"source_log_file",
"read_source_log_pos",
"relay_source_log_file",
@@ -930,8 +926,6 @@ func mkInsertForInstances(instances []*Instance, instanceWasActuallyFound bool,
args = append(args, instance.GTIDMode)
args = append(args, instance.GtidPurged)
args = append(args, instance.GtidErrant)
- args = append(args, instance.UsingMariaDBGTID)
- args = append(args, instance.UsingPseudoGTID)
args = append(args, instance.ReadBinlogCoordinates.LogFile)
args = append(args, instance.ReadBinlogCoordinates.LogPos)
args = append(args, instance.ExecBinlogCoordinates.LogFile)
diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go
index cc3217442ed..1a14041450c 100644
--- a/go/vt/vtorc/inst/instance_dao_test.go
+++ b/go/vt/vtorc/inst/instance_dao_test.go
@@ -63,14 +63,14 @@ func TestMkInsertSingle(t *testing.T) {
(alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid,
version, major_version, version_comment, binlog_server, read_only, binlog_format,
binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_net_timeout, heartbeat_interval,
- replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid,
+ replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant,
source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen)
VALUES
- (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now'))
+ (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now'))
`
a1 := `zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT,
FULL, false, false, , 0, , 0, 0, 0,
- false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,`
+ false, false, 0, 0, false, false, false, , , , , , , , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,`
sql1, args1, err := mkInsertForInstances(instances[:1], false, true)
require.NoError(t, err)
@@ -86,17 +86,17 @@ func TestMkInsertThree(t *testing.T) {
(alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid,
version, major_version, version_comment, binlog_server, read_only, binlog_format,
binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_net_timeout, heartbeat_interval,
- replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid,
+ replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant,
source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen)
VALUES
- (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')),
- (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')),
- (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now'))
+ (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')),
+ (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now')),
+ (?, ?, ?, DATETIME('now'), DATETIME('now'), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, DATETIME('now'))
`
a3 := `
- zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,
- zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,
- zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,
+ zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,
+ zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,
+ zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,
`
sql3, args3, err := mkInsertForInstances(instances[:3], true, true)
diff --git a/go/vt/vtorc/test/recovery_analysis.go b/go/vt/vtorc/test/recovery_analysis.go
index 2a95d3b2b0e..218a679bdb0 100644
--- a/go/vt/vtorc/test/recovery_analysis.go
+++ b/go/vt/vtorc/test/recovery_analysis.go
@@ -62,7 +62,6 @@ type InfoForRecoveryAnalysis struct {
DowntimeEndTimestamp string
DowntimeRemainingSeconds int
CountValidOracleGTIDReplicas uint
- CountValidMariaDBGTIDReplicas uint
CountValidBinlogServerReplicas uint
SemiSyncPrimaryEnabled int
SemiSyncPrimaryStatus int
@@ -94,7 +93,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap {
rowMap["count_downtimed_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountDowntimedReplicas), Valid: true}
rowMap["count_lagging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountLaggingReplicas), Valid: true}
rowMap["count_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountLoggingReplicas), Valid: true}
- rowMap["count_mariadb_gtid_replicas"] = sqlutils.CellData{Valid: false}
rowMap["count_mixed_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountMixedBasedLoggingReplicas), Valid: true}
rowMap["count_oracle_gtid_replicas"] = sqlutils.CellData{Valid: false}
rowMap["count_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountReplicas), Valid: true}
@@ -102,7 +100,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap {
rowMap["count_semi_sync_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountSemiSyncReplicasEnabled), Valid: true}
rowMap["count_statement_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountStatementBasedLoggingReplicas), Valid: true}
rowMap["count_valid_binlog_server_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidBinlogServerReplicas), Valid: true}
- rowMap["count_valid_mariadb_gtid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidMariaDBGTIDReplicas), Valid: true}
rowMap["count_valid_oracle_gtid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidOracleGTIDReplicas), Valid: true}
rowMap["count_valid_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidReplicas), Valid: true}
rowMap["count_valid_replicating_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountValidReplicatingReplicas), Valid: true}
diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go
index f13efa66124..47794e92b9a 100644
--- a/go/vt/vttablet/tabletmanager/rpc_replication.go
+++ b/go/vt/vttablet/tabletmanager/rpc_replication.go
@@ -19,6 +19,7 @@ package tabletmanager
import (
"context"
"fmt"
+ "runtime"
"strings"
"time"
@@ -29,6 +30,7 @@ import (
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/mysqlctl"
"vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttablet/tabletserver"
@@ -524,6 +526,23 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure
}
defer tm.unlock()
+ finishCtx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go func() {
+ select {
+ case <-finishCtx.Done():
+ // Finished running DemotePrimary. Nothing to do.
+ case <-time.After(10 * topo.RemoteOperationTimeout):
+ // We waited for over 10 times of remote operation timeout, but DemotePrimary is still not done.
+ // Collect more information and signal demote primary is indefinitely stalled.
+ log.Errorf("DemotePrimary seems to be stalled. Collecting more information.")
+ tm.QueryServiceControl.SetDemotePrimaryStalled()
+ buf := make([]byte, 1<<16) // 64 KB buffer size
+ stackSize := runtime.Stack(buf, true)
+ log.Errorf("Stack trace:\n%s", string(buf[:stackSize]))
+ }
+ }()
+
tablet := tm.Tablet()
wasPrimary := tablet.Type == topodatapb.TabletType_PRIMARY
wasServing := tm.QueryServiceControl.IsServing()
diff --git a/go/vt/vttablet/tabletmanager/rpc_replication_test.go b/go/vt/vttablet/tabletmanager/rpc_replication_test.go
index c587f1e24b8..b388235811b 100644
--- a/go/vt/vttablet/tabletmanager/rpc_replication_test.go
+++ b/go/vt/vttablet/tabletmanager/rpc_replication_test.go
@@ -18,10 +18,15 @@ package tabletmanager
import (
"context"
+ "sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/require"
+ "golang.org/x/sync/semaphore"
+
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver"
)
// TestWaitForGrantsToHaveApplied tests that waitForGrantsToHaveApplied only succeeds after waitForDBAGrants has been called.
@@ -42,3 +47,49 @@ func TestWaitForGrantsToHaveApplied(t *testing.T) {
err = tm.waitForGrantsToHaveApplied(secondContext)
require.NoError(t, err)
}
+
+type demotePrimaryStallQS struct {
+ tabletserver.Controller
+ waitTime time.Duration
+ primaryStalled atomic.Bool
+}
+
+func (d *demotePrimaryStallQS) SetDemotePrimaryStalled() {
+ d.primaryStalled.Store(true)
+}
+
+func (d *demotePrimaryStallQS) IsServing() bool {
+ time.Sleep(d.waitTime)
+ return false
+}
+
+// TestDemotePrimaryStalled checks that if demote primary takes too long, then we mark it as stalled.
+func TestDemotePrimaryStalled(t *testing.T) {
+ // Set remote operation timeout to a very low value.
+ origVal := topo.RemoteOperationTimeout
+ topo.RemoteOperationTimeout = 100 * time.Millisecond
+ defer func() {
+ topo.RemoteOperationTimeout = origVal
+ }()
+
+ // Create a fake query service control to intercept calls from DemotePrimary function.
+ qsc := &demotePrimaryStallQS{
+ waitTime: 2 * time.Second,
+ }
+ // Create a tablet manager with a replica type tablet.
+ tm := &TabletManager{
+ actionSema: semaphore.NewWeighted(1),
+ MysqlDaemon: newTestMysqlDaemon(t, 1),
+ tmState: &tmState{
+ displayState: displayState{
+ tablet: newTestTablet(t, 100, "ks", "-", map[string]string{}),
+ },
+ },
+ QueryServiceControl: qsc,
+ }
+
+ // We make IsServing stall for over 2 seconds, which is longer than 10 * remote operation timeout.
+ // This should cause the demote primary operation to be stalled.
+ tm.demotePrimary(context.Background(), false)
+ require.True(t, qsc.primaryStalled.Load())
+}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go
index 12a05a69dbc..9f2647a8770 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go
@@ -166,8 +166,12 @@ func setup(ctx context.Context) (func(), int) {
return cleanup, 0
}
-// We run Tests twice, first with full binlog_row_image, then with noblob.
-var runNoBlobTest = false
+var (
+ // We run unit tests twice, first with binlog_row_image=FULL, then with NOBLOB.
+ runNoBlobTest = false
+ // When using MySQL 8.0 or later, we set binlog_row_value_options=PARTIAL_JSON.
+ runPartialJSONTest = false
+)
// We use this tempDir for creating the external cnfs, since we create the test cluster afterwards.
const tempDir = "/tmp"
@@ -178,10 +182,14 @@ func TestMain(m *testing.M) {
exitCode := func() int {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- if err := utils.SetBinlogRowImageMode("full", tempDir); err != nil {
+ // binlog-row-value-options=PARTIAL_JSON is only supported in MySQL 8.0 and later.
+ // We still run unit tests with MySQL 5.7, so we cannot add it to the cnf file
+ // when using 5.7 or mysqld will fail to start.
+ runPartialJSONTest = utils.CIDBPlatformIsMySQL8orLater()
+ if err := utils.SetBinlogRowImageOptions("full", runPartialJSONTest, tempDir); err != nil {
panic(err)
}
- defer utils.SetBinlogRowImageMode("", tempDir)
+ defer utils.SetBinlogRowImageOptions("", false, tempDir)
cancel, ret := setup(ctx)
if ret > 0 {
return ret
@@ -193,10 +201,10 @@ func TestMain(m *testing.M) {
cancel()
runNoBlobTest = true
- if err := utils.SetBinlogRowImageMode("noblob", tempDir); err != nil {
+ if err := utils.SetBinlogRowImageOptions("noblob", runPartialJSONTest, tempDir); err != nil {
panic(err)
}
- defer utils.SetBinlogRowImageMode("", tempDir)
+ defer utils.SetBinlogRowImageOptions("", false, tempDir)
cancel, ret = setup(ctx)
if ret > 0 {
return ret
diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
index 62d6166b5ca..a201ce25847 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
@@ -17,8 +17,10 @@ limitations under the License.
package vreplication
import (
+ "bytes"
"encoding/json"
"fmt"
+ "slices"
"sort"
"strings"
@@ -28,6 +30,8 @@ import (
"vitess.io/vitess/go/mysql/collations/colldata"
vjson "vitess.io/vitess/go/mysql/json"
"vitess.io/vitess/go/mysql/sqlerror"
+ "vitess.io/vitess/go/ptr"
+ "vitess.io/vitess/go/sqlescape"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/sqlparser"
@@ -363,7 +367,10 @@ func (tp *TablePlan) bindFieldVal(field *querypb.Field, val *sqltypes.Value) (*q
func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor func(string) (*sqltypes.Result, error)) (*sqltypes.Result, error) {
// MakeRowTrusted is needed here because Proto3ToResult is not convenient.
- var before, after bool
+ var (
+ before, after bool
+ afterVals []sqltypes.Value
+ )
bindvars := make(map[string]*querypb.BindVariable, len(tp.Fields))
if rowChange.Before != nil {
before = true
@@ -377,24 +384,48 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun
}
}
if rowChange.After != nil {
+ jsonIndex := 0
after = true
- vals := sqltypes.MakeRowTrusted(tp.Fields, rowChange.After)
+ afterVals = sqltypes.MakeRowTrusted(tp.Fields, rowChange.After)
for i, field := range tp.Fields {
- var bindVar *querypb.BindVariable
- var newVal *sqltypes.Value
- var err error
+ var (
+ bindVar *querypb.BindVariable
+ newVal *sqltypes.Value
+ err error
+ )
if field.Type == querypb.Type_JSON {
- if vals[i].IsNull() { // An SQL NULL and not an actual JSON value
+ switch {
+ case afterVals[i].IsNull(): // An SQL NULL and not an actual JSON value
newVal = &sqltypes.NULL
- } else { // A JSON value (which may be a JSON null literal value)
- newVal, err = vjson.MarshalSQLValue(vals[i].Raw())
+ case rowChange.JsonPartialValues != nil && isBitSet(rowChange.JsonPartialValues.Cols, jsonIndex) &&
+ !slices.Equal(afterVals[i].Raw(), sqltypes.NullBytes):
+ // An SQL expression that can be converted to a JSON value such as JSON_INSERT().
+ // This occurs when using partial JSON values as a result of mysqld using
+ // binlog-row-value-options=PARTIAL_JSON.
+ if len(afterVals[i].Raw()) == 0 {
+ // If the JSON column was NOT updated then the JSON column is marked as
+ // partial and the diff is empty as a way to exclude it from the AFTER image.
+ // It still has the data bit set, however, even though it's not really
+ // present. So we have to account for this by unsetting the data bit so
+ // that the column's current JSON value is not lost.
+ setBit(rowChange.DataColumns.Cols, i, false)
+ newVal = ptr.Of(sqltypes.MakeTrusted(querypb.Type_EXPRESSION, nil))
+ } else {
+ escapedName := sqlescape.EscapeID(field.Name)
+ newVal = ptr.Of(sqltypes.MakeTrusted(querypb.Type_EXPRESSION, []byte(
+ fmt.Sprintf(afterVals[i].RawStr(), escapedName),
+ )))
+ }
+ default: // A JSON value (which may be a JSON null literal value)
+ newVal, err = vjson.MarshalSQLValue(afterVals[i].Raw())
if err != nil {
return nil, err
}
}
bindVar, err = tp.bindFieldVal(field, newVal)
+ jsonIndex++
} else {
- bindVar, err = tp.bindFieldVal(field, &vals[i])
+ bindVar, err = tp.bindFieldVal(field, &afterVals[i])
}
if err != nil {
return nil, err
@@ -404,7 +435,7 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun
}
switch {
case !before && after:
- // only apply inserts for rows whose primary keys are within the range of rows already copied
+ // Only apply inserts for rows whose primary keys are within the range of rows already copied.
if tp.isOutsidePKRange(bindvars, before, after, "insert") {
return nil, nil
}
@@ -444,6 +475,61 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun
if tp.isOutsidePKRange(bindvars, before, after, "insert") {
return nil, nil
}
+ if tp.isPartial(rowChange) {
+ // We need to use a combination of the values in the BEFORE and AFTER image to generate the
+ // new row.
+ jsonIndex := 0
+ for i, field := range tp.Fields {
+ if field.Type == querypb.Type_JSON && rowChange.JsonPartialValues != nil {
+ switch {
+ case !isBitSet(rowChange.JsonPartialValues.Cols, jsonIndex):
+ // We use the full AFTER value which we already have.
+ case len(afterVals[i].Raw()) == 0:
+ // If the JSON column was NOT updated then the JSON column is marked as partial
+ // and the diff is empty as a way to exclude it from the AFTER image. So we
+ // want to use the BEFORE image value.
+ beforeVal, err := vjson.MarshalSQLValue(bindvars["b_"+field.Name].Value)
+ if err != nil {
+ return nil, vterrors.Wrapf(err, "failed to convert JSON to SQL field value for %s.%s when building insert query",
+ tp.TargetName, field.Name)
+ }
+ bindvars["a_"+field.Name], err = tp.bindFieldVal(field, beforeVal)
+ if err != nil {
+ return nil, vterrors.Wrapf(err, "failed to bind field value for %s.%s when building insert query",
+ tp.TargetName, field.Name)
+ }
+ default:
+ // For JSON columns when binlog-row-value-options=PARTIAL_JSON is used and the
+ // column is marked as partial, we need to wrap the JSON diff function(s)
+ // around the BEFORE value.
+ diff := afterVals[i].RawStr()
+ beforeVal := bindvars["b_"+field.Name].Value
+ buf := bytes.Buffer{}
+ buf.Grow(len(beforeVal) + len(sqlparser.Utf8mb4Str) + 2) // +2 is for the enclosing quotes
+ buf.WriteString(sqlparser.Utf8mb4Str)
+ buf.WriteByte('\'')
+ buf.Write(beforeVal)
+ buf.WriteByte('\'')
+ newVal := sqltypes.MakeTrusted(querypb.Type_EXPRESSION, []byte(
+ fmt.Sprintf(diff, buf.String()),
+ ))
+ bv, err := tp.bindFieldVal(field, &newVal)
+ if err != nil {
+ return nil, vterrors.Wrapf(err, "failed to bind field value for %s.%s when building insert query",
+ tp.TargetName, field.Name)
+ }
+ bindvars["a_"+field.Name] = bv
+ }
+ jsonIndex++
+ continue
+ }
+ if !isBitSet(rowChange.DataColumns.Cols, i) {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL,
+ "binary log event missing a needed value for %s.%s due to not using binlog-row-image=FULL; you will need to re-run the workflow with binlog-row-image=FULL",
+ tp.TargetName, field.Name)
+ }
+ }
+ }
return execParsedQuery(tp.Insert, bindvars, executor)
}
// Unreachable.
@@ -540,11 +626,28 @@ func (tp *TablePlan) applyBulkInsertChanges(rowInserts []*binlogdatapb.RowChange
newStmt := true
for _, rowInsert := range rowInserts {
+ var (
+ err error
+ bindVar *querypb.BindVariable
+ )
rowValues := &strings.Builder{}
bindvars := make(map[string]*querypb.BindVariable, len(tp.Fields))
vals := sqltypes.MakeRowTrusted(tp.Fields, rowInsert.After)
for n, field := range tp.Fields {
- bindVar, err := tp.bindFieldVal(field, &vals[n])
+ if field.Type == querypb.Type_JSON {
+ var jsVal *sqltypes.Value
+ if vals[n].IsNull() { // An SQL NULL and not an actual JSON value
+ jsVal = &sqltypes.NULL
+ } else { // A JSON value (which may be a JSON null literal value)
+ jsVal, err = vjson.MarshalSQLValue(vals[n].Raw())
+ if err != nil {
+ return nil, err
+ }
+ }
+ bindVar, err = tp.bindFieldVal(field, jsVal)
+ } else {
+ bindVar, err = tp.bindFieldVal(field, &vals[n])
+ }
if err != nil {
return nil, err
}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go
index 85e0fd8e50f..bc3088bbb44 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go
@@ -26,7 +26,6 @@ import (
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
- vttablet "vitess.io/vitess/go/vt/vttablet/common"
)
// isBitSet returns true if the bit at index is set
@@ -36,14 +35,22 @@ func isBitSet(data []byte, index int) bool {
return data[byteIndex]&bitMask > 0
}
-func (tp *TablePlan) isPartial(rowChange *binlogdatapb.RowChange) bool {
- if (tp.WorkflowConfig.ExperimentalFlags /**/ & /**/ vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage) == 0 ||
- rowChange.DataColumns == nil ||
- rowChange.DataColumns.Count == 0 {
+func setBit(data []byte, index int, value bool) {
+ byteIndex := index / 8
+ bitMask := byte(1 << (uint(index) & 0x7))
+ if value {
+ data[byteIndex] |= bitMask
+ } else {
+ data[byteIndex] &= 0xff - bitMask
+ }
+}
+func (tp *TablePlan) isPartial(rowChange *binlogdatapb.RowChange) bool {
+ if rowChange == nil {
return false
}
- return true
+ return (rowChange.DataColumns != nil && rowChange.DataColumns.Count > 0) ||
+ (rowChange.JsonPartialValues != nil && rowChange.JsonPartialValues.Count > 0)
}
func (tpb *tablePlanBuilder) generatePartialValuesPart(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter, dataColumns *binlogdatapb.RowChange_Bitmap) *sqlparser.ParsedQuery {
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go
index 50d93e60e5a..1267ad5e20c 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go
@@ -1519,6 +1519,296 @@ func TestPlayerRowMove(t *testing.T) {
validateQueryCountStat(t, "replicate", 3)
}
+// TestPlayerPartialImages tests the behavior of the vplayer when modifying
+// a table with BLOB and JSON columns, including when modifying the Primary
+// Key for a row, when we have partial binlog images, meaning that
+// binlog-row-image=NOBLOB and/or binlog-row-value-options=PARTIAL_JSON.
+func TestPlayerPartialImages(t *testing.T) {
+ if !runPartialJSONTest {
+ t.Skip("Skipping test as binlog_row_value_options=PARTIAL_JSON is not enabled")
+ }
+
+ defer deleteTablet(addTablet(100))
+ execStatements(t, []string{
+ "create table src (id int, jd json, bd blob, primary key(id))",
+ fmt.Sprintf("create table %s.dst (id int, jd json, bd blob, primary key(id))", vrepldb),
+ })
+ defer execStatements(t, []string{
+ "drop table src",
+ fmt.Sprintf("drop table %s.dst", vrepldb),
+ })
+
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "dst",
+ Filter: "select * from src",
+ }},
+ }
+ bls := &binlogdatapb.BinlogSource{
+ Keyspace: env.KeyspaceName,
+ Shard: env.ShardName,
+ Filter: filter,
+ OnDdl: binlogdatapb.OnDDLAction_IGNORE,
+ }
+ cancel, _ := startVReplication(t, bls, "")
+ defer cancel()
+
+ type testCase struct {
+ input string
+ output []string
+ data [][]string
+ error string
+ }
+
+ var testCases []testCase
+
+ if vttablet.DefaultVReplicationConfig.ExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching == 0 {
+ testCases = append(testCases, testCase{
+ input: "insert into src (id, jd, bd) values (1,'{\"key1\": \"val1\"}','blob data'), (2,'{\"key2\": \"val2\"}','blob data2'), (3,'{\"key3\": \"val3\"}','blob data3')",
+ output: []string{
+ "insert into dst(id,jd,bd) values (1,JSON_OBJECT(_utf8mb4'key1', _utf8mb4'val1'),_binary'blob data')",
+ "insert into dst(id,jd,bd) values (2,JSON_OBJECT(_utf8mb4'key2', _utf8mb4'val2'),_binary'blob data2')",
+ "insert into dst(id,jd,bd) values (3,JSON_OBJECT(_utf8mb4'key3', _utf8mb4'val3'),_binary'blob data3')",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\"}", "blob data"},
+ {"2", "{\"key2\": \"val2\"}", "blob data2"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ },
+ })
+ } else {
+ testCases = append(testCases, testCase{
+ input: "insert into src (id, jd, bd) values (1,'{\"key1\": \"val1\"}','blob data'), (2,'{\"key2\": \"val2\"}','blob data2'), (3,'{\"key3\": \"val3\"}','blob data3')",
+ output: []string{
+ "insert into dst(id,jd,bd) values (1,JSON_OBJECT(_utf8mb4'key1', _utf8mb4'val1'),_binary'blob data'), (2,JSON_OBJECT(_utf8mb4'key2', _utf8mb4'val2'),_binary'blob data2'), (3,JSON_OBJECT(_utf8mb4'key3', _utf8mb4'val3'),_binary'blob data3')",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\"}", "blob data"},
+ {"2", "{\"key2\": \"val2\"}", "blob data2"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ },
+ })
+ }
+ if runNoBlobTest {
+ testCases = append(testCases, testCase{
+ input: `update src set jd=JSON_SET(jd, '$.color', 'red') where id = 1`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.color', CAST(JSON_QUOTE(_utf8mb4'red') as JSON)) where id=1",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\"}", "blob data"},
+ {"2", "{\"key2\": \"val2\"}", "blob data2"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ },
+ })
+ } else {
+ testCases = append(testCases, testCase{
+ input: `update src set jd=JSON_SET(jd, '$.color', 'red') where id = 1`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.color', CAST(JSON_QUOTE(_utf8mb4'red') as JSON)), bd=_binary'blob data' where id=1",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\"}", "blob data"},
+ {"2", "{\"key2\": \"val2\"}", "blob data2"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ },
+ })
+ }
+ testCases = append(testCases, []testCase{
+ {
+ input: `update src set bd = 'new blob data' where id = 2`,
+ output: []string{
+ "update dst set bd=_binary'new blob data' where id=2",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\"}", "blob data"},
+ {"2", "{\"key2\": \"val2\"}", "new blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ },
+ },
+ {
+ input: `update src set id = id+10, bd = 'newest blob data' where id = 2`,
+ output: []string{
+ "delete from dst where id=2",
+ "insert into dst(id,jd,bd) values (12,JSON_OBJECT(_utf8mb4'key2', _utf8mb4'val2'),_binary'newest blob data')",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\"}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\"}", "newest blob data"},
+ },
+ },
+ }...)
+ if runNoBlobTest {
+ testCases = append(testCases, []testCase{
+ {
+ input: `update src set jd=JSON_SET(jd, '$.years', 5) where id = 1`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.years', CAST(5 as JSON)) where id=1",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\"}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set jd=JSON_SET(jd, '$.hobbies', JSON_ARRAY('skiing', 'video games', 'hiking')) where id = 1`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.hobbies', JSON_ARRAY(_utf8mb4'skiing', _utf8mb4'video games', _utf8mb4'hiking')) where id=1",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\"}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set jd=JSON_SET(jd, '$.misc', '{"address":"1012 S Park", "town":"Hastings", "state":"MI"}') where id = 12`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.misc', CAST(JSON_QUOTE(_utf8mb4'{\"address\":\"1012 S Park\", \"town\":\"Hastings\", \"state\":\"MI\"}') as JSON)) where id=12",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\", \"misc\": \"{\\\"address\\\":\\\"1012 S Park\\\", \\\"town\\\":\\\"Hastings\\\", \\\"state\\\":\\\"MI\\\"}\"}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set jd=JSON_SET(jd, '$.current', true) where id = 12`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.current', CAST(_utf8mb4'true' as JSON)) where id=12",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\", \"misc\": \"{\\\"address\\\":\\\"1012 S Park\\\", \\\"town\\\":\\\"Hastings\\\", \\\"state\\\":\\\"MI\\\"}\", \"current\": true}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set jd=JSON_SET(jd, '$.idontknow', null, '$.idontknoweither', 'null') where id = 3`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(JSON_INSERT(`jd`, _utf8mb4'$.idontknow', CAST(_utf8mb4'null' as JSON)), _utf8mb4'$.idontknoweither', CAST(JSON_QUOTE(_utf8mb4'null') as JSON)) where id=3",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"3", "{\"key3\": \"val3\", \"idontknow\": null, \"idontknoweither\": \"null\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\", \"misc\": \"{\\\"address\\\":\\\"1012 S Park\\\", \\\"town\\\":\\\"Hastings\\\", \\\"state\\\":\\\"MI\\\"}\", \"current\": true}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set id = id+10 where id = 3`,
+ error: "binary log event missing a needed value for dst.bd due to not using binlog-row-image=FULL",
+ },
+ }...)
+ } else {
+ testCases = append(testCases, []testCase{
+ {
+ input: `update src set jd=JSON_SET(jd, '$.years', 5) where id = 1`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.years', CAST(5 as JSON)), bd=_binary'blob data' where id=1",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\"}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set jd=JSON_SET(jd, '$.hobbies', JSON_ARRAY('skiing', 'video games', 'hiking')) where id = 1`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.hobbies', JSON_ARRAY(_utf8mb4'skiing', _utf8mb4'video games', _utf8mb4'hiking')), bd=_binary'blob data' where id=1",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\"}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set jd=JSON_SET(jd, '$.misc', '{"address":"1012 S Park", "town":"Hastings", "state":"MI"}') where id = 12`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.misc', CAST(JSON_QUOTE(_utf8mb4'{\"address\":\"1012 S Park\", \"town\":\"Hastings\", \"state\":\"MI\"}') as JSON)), bd=_binary'newest blob data' where id=12",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\", \"misc\": \"{\\\"address\\\":\\\"1012 S Park\\\", \\\"town\\\":\\\"Hastings\\\", \\\"state\\\":\\\"MI\\\"}\"}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set jd=JSON_SET(jd, '$.current', true) where id = 12`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(`jd`, _utf8mb4'$.current', CAST(_utf8mb4'true' as JSON)), bd=_binary'newest blob data' where id=12",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"3", "{\"key3\": \"val3\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\", \"misc\": \"{\\\"address\\\":\\\"1012 S Park\\\", \\\"town\\\":\\\"Hastings\\\", \\\"state\\\":\\\"MI\\\"}\", \"current\": true}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set jd=JSON_SET(jd, '$.idontknow', null, '$.idontknoweither', 'null') where id = 3`,
+ output: []string{
+ "update dst set jd=JSON_INSERT(JSON_INSERT(`jd`, _utf8mb4'$.idontknow', CAST(_utf8mb4'null' as JSON)), _utf8mb4'$.idontknoweither', CAST(JSON_QUOTE(_utf8mb4'null') as JSON)), bd=_binary'blob data3' where id=3",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"3", "{\"key3\": \"val3\", \"idontknow\": null, \"idontknoweither\": \"null\"}", "blob data3"},
+ {"12", "{\"key2\": \"val2\", \"misc\": \"{\\\"address\\\":\\\"1012 S Park\\\", \\\"town\\\":\\\"Hastings\\\", \\\"state\\\":\\\"MI\\\"}\", \"current\": true}", "newest blob data"},
+ },
+ },
+ {
+ input: `update src set id = id+10 where id = 3`,
+ output: []string{
+ "delete from dst where id=3",
+ "insert into dst(id,jd,bd) values (13,JSON_OBJECT(_utf8mb4'idontknow', null, _utf8mb4'idontknoweither', _utf8mb4'null', _utf8mb4'key3', _utf8mb4'val3'),_binary'blob data3')",
+ },
+ data: [][]string{
+ {"1", "{\"key1\": \"val1\", \"color\": \"red\", \"years\": 5, \"hobbies\": [\"skiing\", \"video games\", \"hiking\"]}", "blob data"},
+ {"12", "{\"key2\": \"val2\", \"misc\": \"{\\\"address\\\":\\\"1012 S Park\\\", \\\"town\\\":\\\"Hastings\\\", \\\"state\\\":\\\"MI\\\"}\", \"current\": true}", "newest blob data"},
+ {"13", "{\"key3\": \"val3\", \"idontknow\": null, \"idontknoweither\": \"null\"}", "blob data3"},
+ },
+ },
+ }...)
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.input, func(t *testing.T) {
+ execStatements(t, []string{tc.input})
+ var want qh.ExpectationSequencer
+ if tc.error != "" {
+ if vttablet.DefaultVReplicationConfig.ExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching == 0 {
+ want = qh.Expect(
+ "begin",
+ "delete from dst where id=3",
+ "rollback",
+ ).Then(qh.Immediately(
+ fmt.Sprintf("/update _vt.vreplication set message=.*%s.*", tc.error),
+ ))
+ } else {
+ want = qh.Expect(
+ "rollback",
+ ).Then(qh.Immediately(
+ fmt.Sprintf("/update _vt.vreplication set message=.*%s.*", tc.error),
+ ))
+ }
+ expectDBClientQueries(t, want)
+ } else {
+ want = qh.Expect(
+ "begin",
+ tc.output...,
+ ).Then(qh.Immediately(
+ "/update _vt.vreplication set pos=",
+ "commit",
+ ))
+ expectDBClientQueries(t, want)
+ expectData(t, "dst", tc.data)
+ }
+ })
+ }
+}
+
func TestPlayerTypes(t *testing.T) {
defer deleteTablet(addTablet(100))
execStatements(t, []string{
@@ -1575,12 +1865,14 @@ func TestPlayerTypes(t *testing.T) {
}
cancel, _ := startVReplication(t, bls, "")
defer cancel()
+
type testcase struct {
input string
output string
table string
data [][]string
}
+
testcases := []testcase{{
input: "insert into vitess_ints values(-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)",
output: "insert into vitess_ints(tiny,tinyu,small,smallu,medium,mediumu,normal,normalu,big,bigu,y) values (-128,255,-32768,65535,-8388608,16777215,-2147483648,4294967295,-9223372036854775808,18446744073709551615,2012)",
@@ -1653,15 +1945,30 @@ func TestPlayerTypes(t *testing.T) {
{"1", "", "{}", "123", `{"a": [42, 100]}`, `{"foo": "bar"}`},
{"2", "null", `{"name": null}`, "123", `{"a": [42, 100]}`, `{"foo": "bar"}`},
},
- }, {
- input: "update vitess_json set val1 = '{\"bar\": \"foo\"}', val4 = '{\"a\": [98, 123]}', val5 = convert(x'7b7d' using utf8mb4) where id=1",
- output: "update vitess_json set val1=JSON_OBJECT(_utf8mb4'bar', _utf8mb4'foo'), val2=JSON_OBJECT(), val3=CAST(123 as JSON), val4=JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(98, 123)), val5=JSON_OBJECT() where id=1",
- table: "vitess_json",
- data: [][]string{
- {"1", `{"bar": "foo"}`, "{}", "123", `{"a": [98, 123]}`, `{}`},
- {"2", "null", `{"name": null}`, "123", `{"a": [42, 100]}`, `{"foo": "bar"}`},
- },
}}
+ if runPartialJSONTest {
+ // With partial JSON values we don't replicate the JSON columns that aren't
+ // actually updated.
+ testcases = append(testcases, testcase{
+ input: "update vitess_json set val1 = '{\"bar\": \"foo\"}', val4 = '{\"a\": [98, 123]}', val5 = convert(x'7b7d' using utf8mb4) where id=1",
+ output: "update vitess_json set val1=JSON_OBJECT(_utf8mb4'bar', _utf8mb4'foo'), val4=JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(98, 123)), val5=JSON_OBJECT() where id=1",
+ table: "vitess_json",
+ data: [][]string{
+ {"1", `{"bar": "foo"}`, "{}", "123", `{"a": [98, 123]}`, `{}`},
+ {"2", "null", `{"name": null}`, "123", `{"a": [42, 100]}`, `{"foo": "bar"}`},
+ },
+ })
+ } else {
+ testcases = append(testcases, testcase{
+ input: "update vitess_json set val1 = '{\"bar\": \"foo\"}', val4 = '{\"a\": [98, 123]}', val5 = convert(x'7b7d' using utf8mb4) where id=1",
+ output: "update vitess_json set val1=JSON_OBJECT(_utf8mb4'bar', _utf8mb4'foo'), val2=JSON_OBJECT(), val3=CAST(123 as JSON), val4=JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(98, 123)), val5=JSON_OBJECT() where id=1",
+ table: "vitess_json",
+ data: [][]string{
+ {"1", `{"bar": "foo"}`, "{}", "123", `{"a": [98, 123]}`, `{}`},
+ {"2", "null", `{"name": null}`, "123", `{"a": [42, 100]}`, `{"foo": "bar"}`},
+ },
+ })
+ }
for _, tcases := range testcases {
execStatements(t, []string{tcases.input})
diff --git a/go/vt/vttablet/tabletserver/controller.go b/go/vt/vttablet/tabletserver/controller.go
index cef0dd2baee..c4a4bef99fc 100644
--- a/go/vt/vttablet/tabletserver/controller.go
+++ b/go/vt/vttablet/tabletserver/controller.go
@@ -119,6 +119,9 @@ type Controller interface {
// WaitForPreparedTwoPCTransactions waits for all prepared transactions to be resolved.
WaitForPreparedTwoPCTransactions(ctx context.Context) error
+
+ // SetDemotePrimaryStalled marks that demote primary is stalled in the state manager.
+ SetDemotePrimaryStalled()
}
// Ensure TabletServer satisfies Controller interface.
diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go
index cae6a237dc8..4512b26f177 100644
--- a/go/vt/vttablet/tabletserver/state_manager.go
+++ b/go/vt/vttablet/tabletserver/state_manager.go
@@ -87,18 +87,19 @@ type stateManager struct {
//
// If a transition fails, we set retrying to true and launch
// retryTransition which loops until the state converges.
- mu sync.Mutex
- wantState servingState
- wantTabletType topodatapb.TabletType
- state servingState
- target *querypb.Target
- ptsTimestamp time.Time
- retrying bool
- replHealthy bool
- lameduck bool
- alsoAllow []topodatapb.TabletType
- reason string
- transitionErr error
+ mu sync.Mutex
+ wantState servingState
+ wantTabletType topodatapb.TabletType
+ state servingState
+ target *querypb.Target
+ ptsTimestamp time.Time
+ retrying bool
+ replHealthy bool
+ demotePrimaryStalled bool
+ lameduck bool
+ alsoAllow []topodatapb.TabletType
+ reason string
+ transitionErr error
rw *requestsWaiter
@@ -387,7 +388,7 @@ func (sm *stateManager) StartRequest(ctx context.Context, target *querypb.Target
sm.mu.Lock()
defer sm.mu.Unlock()
- if sm.state != StateServing || !sm.replHealthy {
+ if sm.state != StateServing || !sm.replHealthy || sm.demotePrimaryStalled {
// This specific error string needs to be returned for vtgate buffering to work.
return vterrors.New(vtrpcpb.Code_CLUSTER_EVENT, vterrors.NotServing)
}
@@ -715,6 +716,10 @@ func (sm *stateManager) Broadcast() {
defer sm.mu.Unlock()
lag, err := sm.refreshReplHealthLocked()
+ if sm.demotePrimaryStalled {
+ // If we are stalled while demoting primary, we should send an error for it.
+ err = vterrors.VT09031()
+ }
sm.hs.ChangeState(sm.target.TabletType, sm.ptsTimestamp, lag, err, sm.isServingLocked())
}
@@ -772,7 +777,7 @@ func (sm *stateManager) IsServing() bool {
}
func (sm *stateManager) isServingLocked() bool {
- return sm.state == StateServing && sm.wantState == StateServing && sm.replHealthy && !sm.lameduck
+ return sm.state == StateServing && sm.wantState == StateServing && sm.replHealthy && !sm.demotePrimaryStalled && !sm.lameduck
}
func (sm *stateManager) AppendDetails(details []*kv) []*kv {
diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go
index df819c6f05c..f8059d6edea 100644
--- a/go/vt/vttablet/tabletserver/state_manager_test.go
+++ b/go/vt/vttablet/tabletserver/state_manager_test.go
@@ -669,6 +669,45 @@ func TestStateManagerNotify(t *testing.T) {
sm.StopService()
}
+func TestDemotePrimaryStalled(t *testing.T) {
+ sm := newTestStateManager()
+ defer sm.StopService()
+ err := sm.SetServingType(topodatapb.TabletType_PRIMARY, testNow, StateServing, "")
+ require.NoError(t, err)
+ // Stopping the ticker so that we don't get unexpected health streams.
+ sm.hcticks.Stop()
+
+ ch := make(chan *querypb.StreamHealthResponse, 5)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := sm.hs.Stream(context.Background(), func(shr *querypb.StreamHealthResponse) error {
+ ch <- shr
+ return nil
+ })
+ assert.Contains(t, err.Error(), "tabletserver is shutdown")
+ }()
+ defer wg.Wait()
+
+ // Send a broadcast message and check we have no error there.
+ sm.Broadcast()
+ gotshr := <-ch
+ require.Empty(t, gotshr.RealtimeStats.HealthError)
+
+ // If demote primary is stalled, then we should get an error.
+ sm.demotePrimaryStalled = true
+ sm.Broadcast()
+ gotshr = <-ch
+ require.EqualValues(t, "VT09031: Primary demotion is stalled", gotshr.RealtimeStats.HealthError)
+ // Verify that we can't start a new request once we have a demote primary stalled.
+ err = sm.StartRequest(context.Background(), &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}, false)
+ require.ErrorContains(t, err, "operation not allowed in state NOT_SERVING")
+
+ // Stop the state manager.
+ sm.StopService()
+}
+
func TestRefreshReplHealthLocked(t *testing.T) {
sm := newTestStateManager()
defer sm.StopService()
diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go
index 847de25eb02..deeac10bd05 100644
--- a/go/vt/vttablet/tabletserver/tabletserver.go
+++ b/go/vt/vttablet/tabletserver/tabletserver.go
@@ -758,6 +758,14 @@ func (tsv *TabletServer) WaitForPreparedTwoPCTransactions(ctx context.Context) e
}
}
+// SetDemotePrimaryStalled marks that demote primary is stalled in the state manager.
+func (tsv *TabletServer) SetDemotePrimaryStalled() {
+ tsv.sm.mu.Lock()
+ tsv.sm.demotePrimaryStalled = true
+ tsv.sm.mu.Unlock()
+ tsv.BroadcastHealth()
+}
+
// CreateTransaction creates the metadata for a 2PC transaction.
func (tsv *TabletServer) CreateTransaction(ctx context.Context, target *querypb.Target, dtid string, participants []*querypb.Target) (err error) {
return tsv.execRequest(
diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
index 59db723ff2b..fb4cb324047 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go
@@ -623,7 +623,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent, bufferAndTransmit func(vev
if vevent != nil {
vevents = append(vevents, vevent)
}
- case ev.IsWriteRows() || ev.IsDeleteRows() || ev.IsUpdateRows():
+ case ev.IsWriteRows() || ev.IsDeleteRows() || ev.IsUpdateRows() || ev.IsPartialUpdateRows():
// The existence of before and after images can be used to
// identify statement types. It's also possible that the
// before and after images end up going to different shards.
@@ -973,7 +973,7 @@ func (vs *vstreamer) processJournalEvent(vevents []*binlogdatapb.VEvent, plan *s
}
nextrow:
for _, row := range rows.Rows {
- afterOK, afterValues, _, err := vs.extractRowAndFilter(plan, row.Data, rows.DataColumns, row.NullColumns)
+ afterOK, afterValues, _, err := vs.extractRowAndFilter(plan, row.Data, rows.DataColumns, row.NullColumns, row.JSONPartialValues)
if err != nil {
return nil, err
}
@@ -1011,11 +1011,14 @@ nextrow:
func (vs *vstreamer) processRowEvent(vevents []*binlogdatapb.VEvent, plan *streamerPlan, rows mysql.Rows) ([]*binlogdatapb.VEvent, error) {
rowChanges := make([]*binlogdatapb.RowChange, 0, len(rows.Rows))
for _, row := range rows.Rows {
- beforeOK, beforeValues, _, err := vs.extractRowAndFilter(plan, row.Identify, rows.IdentifyColumns, row.NullIdentifyColumns)
+ // The BEFORE image does not have partial JSON values so we pass an empty bitmap.
+ beforeOK, beforeValues, _, err := vs.extractRowAndFilter(plan, row.Identify, rows.IdentifyColumns, row.NullIdentifyColumns, mysql.Bitmap{})
if err != nil {
return nil, err
}
- afterOK, afterValues, partial, err := vs.extractRowAndFilter(plan, row.Data, rows.DataColumns, row.NullColumns)
+ // The AFTER image is where we may have partial JSON values, as reflected in the
+ // row's JSONPartialValues bitmap.
+ afterOK, afterValues, partial, err := vs.extractRowAndFilter(plan, row.Data, rows.DataColumns, row.NullColumns, row.JSONPartialValues)
if err != nil {
return nil, err
}
@@ -1028,14 +1031,20 @@ func (vs *vstreamer) processRowEvent(vevents []*binlogdatapb.VEvent, plan *strea
}
if afterOK {
rowChange.After = sqltypes.RowToProto3(afterValues)
- if (vs.config.ExperimentalFlags /**/ & /**/ vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage != 0) &&
- partial {
+ if ((vs.config.ExperimentalFlags /**/ & /**/ vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage != 0) && partial) ||
+ (row.JSONPartialValues.Count() > 0) {
rowChange.DataColumns = &binlogdatapb.RowChange_Bitmap{
Count: int64(rows.DataColumns.Count()),
Cols: rows.DataColumns.Bits(),
}
}
+ if row.JSONPartialValues.Count() > 0 {
+ rowChange.JsonPartialValues = &binlogdatapb.RowChange_Bitmap{
+ Count: int64(row.JSONPartialValues.Count()),
+ Cols: row.JSONPartialValues.Bits(),
+ }
+ }
}
rowChanges = append(rowChanges, rowChange)
}
@@ -1081,13 +1090,14 @@ func (vs *vstreamer) rebuildPlans() error {
// - true, if row needs to be skipped because of workflow filter rules
// - data values, array of one value per column
// - true, if the row image was partial (i.e. binlog_row_image=noblob and dml doesn't update one or more blob/text columns)
-func (vs *vstreamer) extractRowAndFilter(plan *streamerPlan, data []byte, dataColumns, nullColumns mysql.Bitmap) (bool, []sqltypes.Value, bool, error) {
+func (vs *vstreamer) extractRowAndFilter(plan *streamerPlan, data []byte, dataColumns, nullColumns mysql.Bitmap, jsonPartialValues mysql.Bitmap) (bool, []sqltypes.Value, bool, error) {
if len(data) == 0 {
return false, nil, false, nil
}
values := make([]sqltypes.Value, dataColumns.Count())
charsets := make([]collations.ID, len(values))
valueIndex := 0
+ jsonIndex := 0
pos := 0
partial := false
for colNum := 0; colNum < dataColumns.Count(); colNum++ {
@@ -1101,9 +1111,17 @@ func (vs *vstreamer) extractRowAndFilter(plan *streamerPlan, data []byte, dataCo
}
if nullColumns.Bit(valueIndex) {
valueIndex++
+ if plan.Table.Fields[colNum].Type == querypb.Type_JSON {
+ jsonIndex++
+ }
continue
}
- value, l, err := mysqlbinlog.CellValue(data, pos, plan.TableMap.Types[colNum], plan.TableMap.Metadata[colNum], plan.Table.Fields[colNum])
+ partialJSON := false
+ if jsonPartialValues.Count() > 0 && plan.Table.Fields[colNum].Type == querypb.Type_JSON {
+ partialJSON = jsonPartialValues.Bit(jsonIndex)
+ jsonIndex++
+ }
+ value, l, err := mysqlbinlog.CellValue(data, pos, plan.TableMap.Types[colNum], plan.TableMap.Metadata[colNum], plan.Table.Fields[colNum], partialJSON)
if err != nil {
log.Errorf("extractRowAndFilter: %s, table: %s, colNum: %d, fields: %+v, current values: %+v",
err, plan.Table.Name, colNum, plan.Table.Fields, values)
diff --git a/go/vt/vttablet/tabletservermock/controller.go b/go/vt/vttablet/tabletservermock/controller.go
index 9d570b8f6c7..a5242751454 100644
--- a/go/vt/vttablet/tabletservermock/controller.go
+++ b/go/vt/vttablet/tabletservermock/controller.go
@@ -274,6 +274,11 @@ func (tqsc *Controller) WaitForPreparedTwoPCTransactions(context.Context) error
return nil
}
+// SetDemotePrimaryStalled is part of the tabletserver.Controller interface
+func (tqsc *Controller) SetDemotePrimaryStalled() {
+ tqsc.MethodCalled["SetDemotePrimaryStalled"] = true
+}
+
// EnterLameduck implements tabletserver.Controller.
func (tqsc *Controller) EnterLameduck() {
tqsc.mu.Lock()
diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto
index e1df792776b..3d55de7ea14 100644
--- a/proto/binlogdata.proto
+++ b/proto/binlogdata.proto
@@ -333,8 +333,17 @@ message RowChange {
}
query.Row before = 1;
query.Row after = 2;
- // DataColumns is a bitmap of all columns: bit is set if column is present in the after image
+ // DataColumns is a bitmap of all columns: bit is set if column is
+ // present in the after image.
Bitmap data_columns = 3;
+ // JsonPartialValues is a bitmap of any JSON columns, where the bit
+ // is set if the value in the AFTER image is a partial JSON value
+ // that is represented as an expression of
+ // JSON_[INSERT|REPLACE|REMOVE](%s, '$.path', value) which then is
+ // used to add/update/remove a path in the JSON document. When the
+ // value is used the fmt directive must be replaced by the actual
+ // column name of the JSON field.
+ Bitmap json_partial_values = 4;
}
// RowEvent represent row events for one table.
diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go
index b24db1154fb..bf42825d73c 100644
--- a/test/ci_workflow_gen.go
+++ b/test/ci_workflow_gen.go
@@ -176,6 +176,7 @@ type clusterTest struct {
Docker bool
LimitResourceUsage bool
EnableBinlogTransactionCompression bool
+ EnablePartialJSON bool
PartialKeyspace bool
Cores16 bool
NeedsMinio bool
@@ -306,6 +307,7 @@ func generateClusterWorkflows(list []string, tpl string) {
}
if strings.Contains(cluster, "vrepl") {
test.EnableBinlogTransactionCompression = true
+ test.EnablePartialJSON = true
}
mysqlVersionIndicator := ""
if mysqlVersion != defaultMySQLVersion && len(clusterMySQLVersions()) > 1 {
diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl
index 8d0a2f650b5..f51b06a2faf 100644
--- a/test/templates/cluster_endtoend_test.tpl
+++ b/test/templates/cluster_endtoend_test.tpl
@@ -224,6 +224,12 @@ jobs:
EOF
{{end}}
+ {{if .EnablePartialJSON}}
+ cat <<-EOF>>./config/mycnf/mysql8026.cnf
+ binlog-row-value-options=PARTIAL_JSON
+ EOF
+ {{end}}
+
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker={{if .Docker}}true -flavor={{.Platform}}{{else}}false{{end}} -follow -shard {{.Shard}}{{if .PartialKeyspace}} -partial-keyspace=true {{end}}{{if .BuildTag}} -build-tag={{.BuildTag}} {{end}} | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl
index 3704aebac4e..f802ee7ad4a 100644
--- a/test/templates/unit_test.tpl
+++ b/test/templates/unit_test.tpl
@@ -177,6 +177,9 @@ jobs:
export NOVTADMINBUILD=1
export VTEVALENGINETEST="{{.Evalengine}}"
+ # We sometimes need to alter the behavior based on the platform we're
+ # testing, e.g. MySQL 5.7 vs 8.0.
+ export CI_DB_PLATFORM="{{.Platform}}"
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts
index 5be0e3ba031..adb02034c92 100644
--- a/web/vtadmin/src/proto/vtadmin.d.ts
+++ b/web/vtadmin/src/proto/vtadmin.d.ts
@@ -37394,6 +37394,9 @@ export namespace binlogdata {
/** RowChange data_columns */
data_columns?: (binlogdata.RowChange.IBitmap|null);
+
+ /** RowChange json_partial_values */
+ json_partial_values?: (binlogdata.RowChange.IBitmap|null);
}
/** Represents a RowChange. */
@@ -37414,6 +37417,9 @@ export namespace binlogdata {
/** RowChange data_columns. */
public data_columns?: (binlogdata.RowChange.IBitmap|null);
+ /** RowChange json_partial_values. */
+ public json_partial_values?: (binlogdata.RowChange.IBitmap|null);
+
/**
* Creates a new RowChange instance using the specified properties.
* @param [properties] Properties to set
diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js
index 11b70963fb0..d71d3e08fe5 100644
--- a/web/vtadmin/src/proto/vtadmin.js
+++ b/web/vtadmin/src/proto/vtadmin.js
@@ -87873,6 +87873,7 @@ export const binlogdata = $root.binlogdata = (() => {
* @property {query.IRow|null} [before] RowChange before
* @property {query.IRow|null} [after] RowChange after
* @property {binlogdata.RowChange.IBitmap|null} [data_columns] RowChange data_columns
+ * @property {binlogdata.RowChange.IBitmap|null} [json_partial_values] RowChange json_partial_values
*/
/**
@@ -87914,6 +87915,14 @@ export const binlogdata = $root.binlogdata = (() => {
*/
RowChange.prototype.data_columns = null;
+ /**
+ * RowChange json_partial_values.
+ * @member {binlogdata.RowChange.IBitmap|null|undefined} json_partial_values
+ * @memberof binlogdata.RowChange
+ * @instance
+ */
+ RowChange.prototype.json_partial_values = null;
+
/**
* Creates a new RowChange instance using the specified properties.
* @function create
@@ -87944,6 +87953,8 @@ export const binlogdata = $root.binlogdata = (() => {
$root.query.Row.encode(message.after, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim();
if (message.data_columns != null && Object.hasOwnProperty.call(message, "data_columns"))
$root.binlogdata.RowChange.Bitmap.encode(message.data_columns, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim();
+ if (message.json_partial_values != null && Object.hasOwnProperty.call(message, "json_partial_values"))
+ $root.binlogdata.RowChange.Bitmap.encode(message.json_partial_values, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim();
return writer;
};
@@ -87990,6 +88001,10 @@ export const binlogdata = $root.binlogdata = (() => {
message.data_columns = $root.binlogdata.RowChange.Bitmap.decode(reader, reader.uint32());
break;
}
+ case 4: {
+ message.json_partial_values = $root.binlogdata.RowChange.Bitmap.decode(reader, reader.uint32());
+ break;
+ }
default:
reader.skipType(tag & 7);
break;
@@ -88040,6 +88055,11 @@ export const binlogdata = $root.binlogdata = (() => {
if (error)
return "data_columns." + error;
}
+ if (message.json_partial_values != null && message.hasOwnProperty("json_partial_values")) {
+ let error = $root.binlogdata.RowChange.Bitmap.verify(message.json_partial_values);
+ if (error)
+ return "json_partial_values." + error;
+ }
return null;
};
@@ -88070,6 +88090,11 @@ export const binlogdata = $root.binlogdata = (() => {
throw TypeError(".binlogdata.RowChange.data_columns: object expected");
message.data_columns = $root.binlogdata.RowChange.Bitmap.fromObject(object.data_columns);
}
+ if (object.json_partial_values != null) {
+ if (typeof object.json_partial_values !== "object")
+ throw TypeError(".binlogdata.RowChange.json_partial_values: object expected");
+ message.json_partial_values = $root.binlogdata.RowChange.Bitmap.fromObject(object.json_partial_values);
+ }
return message;
};
@@ -88090,6 +88115,7 @@ export const binlogdata = $root.binlogdata = (() => {
object.before = null;
object.after = null;
object.data_columns = null;
+ object.json_partial_values = null;
}
if (message.before != null && message.hasOwnProperty("before"))
object.before = $root.query.Row.toObject(message.before, options);
@@ -88097,6 +88123,8 @@ export const binlogdata = $root.binlogdata = (() => {
object.after = $root.query.Row.toObject(message.after, options);
if (message.data_columns != null && message.hasOwnProperty("data_columns"))
object.data_columns = $root.binlogdata.RowChange.Bitmap.toObject(message.data_columns, options);
+ if (message.json_partial_values != null && message.hasOwnProperty("json_partial_values"))
+ object.json_partial_values = $root.binlogdata.RowChange.Bitmap.toObject(message.json_partial_values, options);
return object;
};